|
20 | 20 | from google.cloud.bigquery_v2 import types as gapic_types |
21 | 21 |
|
22 | 22 |
|
| 23 | +class Compression(object): |
| 24 | + """The compression type to use for exported files. The default value is |
| 25 | + :attr:`NONE`. |
| 26 | +
|
| 27 | + :attr:`DEFLATE` and :attr:`SNAPPY` are |
| 28 | + only supported for Avro. |
| 29 | + """ |
| 30 | + |
| 31 | + GZIP = "GZIP" |
| 32 | + """Specifies GZIP format.""" |
| 33 | + |
| 34 | + DEFLATE = "DEFLATE" |
| 35 | + """Specifies DEFLATE format.""" |
| 36 | + |
| 37 | + SNAPPY = "SNAPPY" |
| 38 | + """Specifies SNAPPY format.""" |
| 39 | + |
| 40 | + NONE = "NONE" |
| 41 | + """Specifies no compression.""" |
| 42 | + |
| 43 | + |
| 44 | +class CreateDisposition(object): |
| 45 | + """Specifies whether the job is allowed to create new tables. The default |
| 46 | + value is :attr:`CREATE_IF_NEEDED`. |
| 47 | +
|
| 48 | + Creation, truncation and append actions occur as one atomic update |
| 49 | + upon job completion. |
| 50 | + """ |
| 51 | + |
| 52 | + CREATE_IF_NEEDED = "CREATE_IF_NEEDED" |
| 53 | + """If the table does not exist, BigQuery creates the table.""" |
| 54 | + |
| 55 | + CREATE_NEVER = "CREATE_NEVER" |
| 56 | + """The table must already exist. If it does not, a 'notFound' error is |
| 57 | + returned in the job result.""" |
| 58 | + |
| 59 | + |
| 60 | +class DestinationFormat(object): |
| 61 | + """The exported file format. The default value is :attr:`CSV`. |
| 62 | +
|
| 63 | + Tables with nested or repeated fields cannot be exported as CSV. |
| 64 | + """ |
| 65 | + |
| 66 | + CSV = "CSV" |
| 67 | + """Specifies CSV format.""" |
| 68 | + |
| 69 | + NEWLINE_DELIMITED_JSON = "NEWLINE_DELIMITED_JSON" |
| 70 | + """Specifies newline delimited JSON format.""" |
| 71 | + |
| 72 | + AVRO = "AVRO" |
| 73 | + """Specifies Avro format.""" |
| 74 | + |
| 75 | + |
| 76 | +class Encoding(object): |
| 77 | + """The character encoding of the data. The default is :attr:`UTF_8`. |
| 78 | +
|
| 79 | + BigQuery decodes the data after the raw, binary data has been |
| 80 | + split using the values of the quote and fieldDelimiter properties. |
| 81 | + """ |
| 82 | + |
| 83 | + UTF_8 = "UTF-8" |
| 84 | + """Specifies UTF-8 encoding.""" |
| 85 | + |
| 86 | + ISO_8859_1 = "ISO-8859-1" |
| 87 | + """Specifies ISO-8859-1 encoding.""" |
| 88 | + |
| 89 | + |
| 90 | +class QueryPriority(object): |
| 91 | + """Specifies a priority for the query. The default value is |
| 92 | + :attr:`INTERACTIVE`. |
| 93 | + """ |
| 94 | + |
| 95 | + INTERACTIVE = "INTERACTIVE" |
| 96 | + """Specifies interactive priority.""" |
| 97 | + |
| 98 | + BATCH = "BATCH" |
| 99 | + """Specifies batch priority.""" |
| 100 | + |
| 101 | + |
| 102 | +class SchemaUpdateOption(object): |
| 103 | + """Specifies an update to the destination table schema as a side effect of |
| 104 | + a load job. |
| 105 | + """ |
| 106 | + |
| 107 | + ALLOW_FIELD_ADDITION = "ALLOW_FIELD_ADDITION" |
| 108 | + """Allow adding a nullable field to the schema.""" |
| 109 | + |
| 110 | + ALLOW_FIELD_RELAXATION = "ALLOW_FIELD_RELAXATION" |
| 111 | + """Allow relaxing a required field in the original schema to nullable.""" |
| 112 | + |
| 113 | + |
| 114 | +class SourceFormat(object): |
| 115 | + """The format of the data files. The default value is :attr:`CSV`. |
| 116 | +
|
| 117 | + Note that the set of allowed values for loading data is different |
| 118 | + than the set used for external data sources (see |
| 119 | + :class:`~google.cloud.bigquery.external_config.ExternalSourceFormat`). |
| 120 | + """ |
| 121 | + |
| 122 | + CSV = "CSV" |
| 123 | + """Specifies CSV format.""" |
| 124 | + |
| 125 | + DATASTORE_BACKUP = "DATASTORE_BACKUP" |
| 126 | + """Specifies datastore backup format""" |
| 127 | + |
| 128 | + NEWLINE_DELIMITED_JSON = "NEWLINE_DELIMITED_JSON" |
| 129 | + """Specifies newline delimited JSON format.""" |
| 130 | + |
| 131 | + AVRO = "AVRO" |
| 132 | + """Specifies Avro format.""" |
| 133 | + |
| 134 | + PARQUET = "PARQUET" |
| 135 | + """Specifies Parquet format.""" |
| 136 | + |
| 137 | + ORC = "ORC" |
| 138 | + """Specifies Orc format.""" |
| 139 | + |
| 140 | + |
23 | 141 | _SQL_SCALAR_TYPES = frozenset( |
24 | 142 | ( |
25 | 143 | "INT64", |
@@ -92,3 +210,24 @@ class SqlTypeNames(str, enum.Enum): |
92 | 210 | DATE = "DATE" |
93 | 211 | TIME = "TIME" |
94 | 212 | DATETIME = "DATETIME" |
| 213 | + |
| 214 | + |
| 215 | +class WriteDisposition(object): |
| 216 | + """Specifies the action that occurs if destination table already exists. |
| 217 | +
|
| 218 | + The default value is :attr:`WRITE_APPEND`. |
| 219 | +
|
| 220 | + Each action is atomic and only occurs if BigQuery is able to complete |
| 221 | + the job successfully. Creation, truncation and append actions occur as one |
| 222 | + atomic update upon job completion. |
| 223 | + """ |
| 224 | + |
| 225 | + WRITE_APPEND = "WRITE_APPEND" |
| 226 | + """If the table already exists, BigQuery appends the data to the table.""" |
| 227 | + |
| 228 | + WRITE_TRUNCATE = "WRITE_TRUNCATE" |
| 229 | + """If the table already exists, BigQuery overwrites the table data.""" |
| 230 | + |
| 231 | + WRITE_EMPTY = "WRITE_EMPTY" |
| 232 | + """If the table already exists and contains data, a 'duplicate' error is |
| 233 | + returned in the job result.""" |
0 commit comments