Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .jsdoc.js
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright 2025 Google LLC
// Copyright 2026 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -40,7 +40,7 @@ module.exports = {
includePattern: '\\.js$'
},
templates: {
copyright: 'Copyright 2025 Google LLC',
copyright: 'Copyright 2026 Google LLC',
includeDate: false,
sourceFiles: false,
systemName: '@google-cloud/bigquery-storage',
Expand Down
25 changes: 25 additions & 0 deletions protos/google/cloud/bigquery/storage/v1/arrow.proto
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,32 @@ message ArrowSerializationOptions {
ZSTD = 2;
}

// The precision of the timestamp value in the Avro message. This precision
// will **only** be applied to the column(s) with the `TIMESTAMP_PICOS` type.
enum PicosTimestampPrecision {
// Unspecified timestamp precision. The default precision is microseconds.
PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0;

// Timestamp values returned by Read API will be truncated to microsecond
// level precision. The value will be encoded as Arrow TIMESTAMP type in a
// 64 bit integer.
TIMESTAMP_PRECISION_MICROS = 1;

// Timestamp values returned by Read API will be truncated to nanosecond
// level precision. The value will be encoded as Arrow TIMESTAMP type in a
// 64 bit integer.
TIMESTAMP_PRECISION_NANOS = 2;

// Read API will return full precision picosecond value. The value will be
// encoded as a string which conforms to ISO 8601 format.
TIMESTAMP_PRECISION_PICOS = 3;
}

// The compression codec to use for Arrow buffers in serialized record
// batches.
CompressionCodec buffer_compression = 2;

// Optional. Set timestamp precision option. If not set, the default precision
// is microseconds.
PicosTimestampPrecision picos_timestamp_precision = 3;
}
25 changes: 25 additions & 0 deletions protos/google/cloud/bigquery/storage/v1/avro.proto
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,27 @@ message AvroRows {

// Contains options specific to Avro Serialization.
message AvroSerializationOptions {
// The precision of the timestamp value in the Avro message. This precision
// will **only** be applied to the column(s) with the `TIMESTAMP_PICOS` type.
enum PicosTimestampPrecision {
// Unspecified timestamp precision. The default precision is microseconds.
PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0;

// Timestamp values returned by Read API will be truncated to microsecond
// level precision. The value will be encoded as Avro TIMESTAMP type in a
// 64 bit integer.
TIMESTAMP_PRECISION_MICROS = 1;

// Timestamp values returned by Read API will be truncated to nanosecond
// level precision. The value will be encoded as Avro TIMESTAMP type in a
// 64 bit integer.
TIMESTAMP_PRECISION_NANOS = 2;

// Read API will return full precision picosecond value. The value will be
// encoded as a string which conforms to ISO 8601 format.
TIMESTAMP_PRECISION_PICOS = 3;
}

// Enable displayName attribute in Avro schema.
//
// The Avro specification requires field names to be alphanumeric. By
Expand All @@ -53,4 +74,8 @@ message AvroSerializationOptions {
// value and populates a "displayName" attribute for every avro field with the
// original column name.
bool enable_display_name_attribute = 1;

// Optional. Set timestamp precision option. If not set, the default precision
// is microseconds.
PicosTimestampPrecision picos_timestamp_precision = 2;
}
19 changes: 8 additions & 11 deletions protos/google/cloud/bigquery/storage/v1/storage.proto
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,9 @@ service BigQueryRead {
}

// Reads rows from the stream in the format prescribed by the ReadSession.
// Each response contains one or more table rows, up to a maximum of 100 MiB
// Each response contains one or more table rows, up to a maximum of 128 MB
// per response; read requests which attempt to read individual rows larger
// than 100 MiB will fail.
// than 128 MB will fail.
//
// Each request also returns a set of stream statistics reflecting the current
// state of the stream.
Expand Down Expand Up @@ -423,8 +423,6 @@ message CreateWriteStreamRequest {
// Requests larger than this return an error, typically `INVALID_ARGUMENT`.
message AppendRowsRequest {
// Arrow schema and data.
// Arrow format is an experimental feature only selected for allowlisted
// customers.
message ArrowData {
// Optional. Arrow Schema used to serialize the data.
ArrowSchema writer_schema = 1;
Expand All @@ -436,8 +434,8 @@ message AppendRowsRequest {
// ProtoData contains the data rows and schema when constructing append
// requests.
message ProtoData {
// The protocol buffer schema used to serialize the data. Provide this value
// whenever:
// Optional. The protocol buffer schema used to serialize the data. Provide
// this value whenever:
//
// * You send the first request of an RPC connection.
//
Expand All @@ -446,7 +444,7 @@ message AppendRowsRequest {
// * You specify a new destination table.
ProtoSchema writer_schema = 1;

// Serialized row data in protobuf message format.
// Required. Serialized row data in protobuf message format.
// Currently, the backend expects the serialized rows to adhere to
// proto2 semantics when appending rows, particularly with respect to
// how default values are encoded.
Expand Down Expand Up @@ -522,8 +520,7 @@ message AppendRowsRequest {
// Rows in proto format.
ProtoData proto_rows = 4;

// Rows in arrow format. This is an experimental feature only selected for
// allowlisted customers.
// Rows in arrow format.
ArrowData arrow_rows = 5;
}

Expand Down Expand Up @@ -553,8 +550,8 @@ message AppendRowsRequest {

// Optional. Default missing value interpretation for all columns in the
// table. When a value is specified on an `AppendRowsRequest`, it is applied
// to all requests on the connection from that point forward, until a
// subsequent `AppendRowsRequest` sets it to a different value.
// to all requests from that point forward, until a subsequent
// `AppendRowsRequest` sets it to a different value.
// `missing_value_interpretation` can override
// `default_missing_value_interpretation`. For example, if you want to write
// `NULL` instead of using default values for some columns, you can set
Expand Down
6 changes: 3 additions & 3 deletions protos/google/cloud/bigquery/storage/v1/stream.proto
Original file line number Diff line number Diff line change
Expand Up @@ -328,8 +328,8 @@ message WriteStream {
// Immutable. Mode of the stream.
WriteMode write_mode = 7 [(google.api.field_behavior) = IMMUTABLE];

// Immutable. The geographic location where the stream's dataset resides. See
// https://cloud.google.com/bigquery/docs/locations for supported
// Output only. The geographic location where the stream's dataset resides.
// See https://cloud.google.com/bigquery/docs/locations for supported
// locations.
string location = 8 [(google.api.field_behavior) = IMMUTABLE];
string location = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
}
11 changes: 11 additions & 0 deletions protos/google/cloud/bigquery/storage/v1/table.proto
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ syntax = "proto3";
package google.cloud.bigquery.storage.v1;

import "google/api/field_behavior.proto";
import "google/protobuf/wrappers.proto";

option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1";
option go_package = "cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb";
Expand Down Expand Up @@ -178,6 +179,16 @@ message TableFieldSchema {
// (https://cloud.google.com/bigquery/docs/default-values) for this field.
string default_value_expression = 10 [(google.api.field_behavior) = OPTIONAL];

// Optional. Precision (maximum number of total digits in base 10) for seconds
// of TIMESTAMP type.
//
// Possible values include:
//
// * 6 (Default, for TIMESTAMP type with microsecond precision)
// * 12 (For TIMESTAMP type with picosecond precision)
google.protobuf.Int64Value timestamp_precision = 27
[(google.api.field_behavior) = OPTIONAL];

// Optional. The subtype of the RANGE, if the type of this field is RANGE. If
// the type is RANGE, this field is required. Possible values for the field
// element type of a RANGE include:
Expand Down
Loading
Loading