40
40
import pyarrow
41
41
42
42
import bigframes .core
43
- from bigframes .core import expression
44
43
import bigframes .core .compile
45
44
import bigframes .core .guid
46
45
import bigframes .core .identifiers
@@ -91,7 +90,6 @@ def to_sql(
91
90
self ,
92
91
array_value : bigframes .core .ArrayValue ,
93
92
offset_column : Optional [str ] = None ,
94
- col_id_overrides : Mapping [str , str ] = {},
95
93
ordered : bool = False ,
96
94
enable_cache : bool = True ,
97
95
) -> str :
@@ -105,7 +103,6 @@ def execute(
105
103
array_value : bigframes .core .ArrayValue ,
106
104
* ,
107
105
ordered : bool = True ,
108
- col_id_overrides : Mapping [str , str ] = {},
109
106
use_explicit_destination : Optional [bool ] = False ,
110
107
get_size_bytes : bool = False ,
111
108
page_size : Optional [int ] = None ,
@@ -119,7 +116,6 @@ def execute(
119
116
def export_gbq (
120
117
self ,
121
118
array_value : bigframes .core .ArrayValue ,
122
- col_id_overrides : Mapping [str , str ],
123
119
destination : bigquery .TableReference ,
124
120
if_exists : Literal ["fail" , "replace" , "append" ] = "fail" ,
125
121
cluster_cols : Sequence [str ] = [],
@@ -132,7 +128,6 @@ def export_gbq(
132
128
def export_gcs (
133
129
self ,
134
130
array_value : bigframes .core .ArrayValue ,
135
- col_id_overrides : Mapping [str , str ],
136
131
uri : str ,
137
132
format : Literal ["json" , "csv" , "parquet" ],
138
133
export_options : Mapping [str , Union [bool , str ]],
@@ -220,29 +215,23 @@ def to_sql(
220
215
self ,
221
216
array_value : bigframes .core .ArrayValue ,
222
217
offset_column : Optional [str ] = None ,
223
- col_id_overrides : Mapping [str , str ] = {},
224
218
ordered : bool = False ,
225
219
enable_cache : bool = True ,
226
220
) -> str :
227
221
if offset_column :
228
222
array_value , internal_offset_col = array_value .promote_offsets ()
229
- col_id_overrides = dict (col_id_overrides )
230
- col_id_overrides [internal_offset_col ] = offset_column
231
223
node = (
232
224
self .replace_cached_subtrees (array_value .node )
233
225
if enable_cache
234
226
else array_value .node
235
227
)
236
- if col_id_overrides :
237
- node = override_ids (node , col_id_overrides )
238
228
return self .compiler .compile (node , ordered = ordered )
239
229
240
230
def execute (
241
231
self ,
242
232
array_value : bigframes .core .ArrayValue ,
243
233
* ,
244
234
ordered : bool = True ,
245
- col_id_overrides : Mapping [str , str ] = {},
246
235
use_explicit_destination : Optional [bool ] = False ,
247
236
get_size_bytes : bool = False ,
248
237
page_size : Optional [int ] = None ,
@@ -254,15 +243,12 @@ def execute(
254
243
if bigframes .options .compute .enable_multi_query_execution :
255
244
self ._simplify_with_caching (array_value )
256
245
257
- sql = self .to_sql (
258
- array_value , ordered = ordered , col_id_overrides = col_id_overrides
259
- )
260
- adjusted_schema = array_value .schema .rename (col_id_overrides )
246
+ sql = self .to_sql (array_value , ordered = ordered )
261
247
job_config = bigquery .QueryJobConfig ()
262
248
# Use explicit destination to avoid 10GB limit of temporary table
263
249
if use_explicit_destination :
264
250
destination_table = self .storage_manager .create_temp_table (
265
- adjusted_schema .to_bigquery (), cluster_cols = []
251
+ array_value . schema .to_bigquery (), cluster_cols = []
266
252
)
267
253
job_config .destination = destination_table
268
254
# TODO(swast): plumb through the api_name of the user-facing api that
@@ -293,12 +279,12 @@ def iterator_supplier():
293
279
)
294
280
# Runs strict validations to ensure internal type predictions and ibis are completely in sync
295
281
# Do not execute these validations outside of testing suite.
296
- if "PYTEST_CURRENT_TEST" in os .environ and len ( col_id_overrides ) == 0 :
282
+ if "PYTEST_CURRENT_TEST" in os .environ :
297
283
self ._validate_result_schema (array_value , iterator .schema )
298
284
299
285
return ExecuteResult (
300
286
arrow_batches = iterator_supplier ,
301
- schema = adjusted_schema ,
287
+ schema = array_value . schema ,
302
288
query_job = query_job ,
303
289
total_bytes = size_bytes ,
304
290
total_rows = iterator .total_rows ,
@@ -307,7 +293,6 @@ def iterator_supplier():
307
293
def export_gbq (
308
294
self ,
309
295
array_value : bigframes .core .ArrayValue ,
310
- col_id_overrides : Mapping [str , str ],
311
296
destination : bigquery .TableReference ,
312
297
if_exists : Literal ["fail" , "replace" , "append" ] = "fail" ,
313
298
cluster_cols : Sequence [str ] = [],
@@ -323,7 +308,7 @@ def export_gbq(
323
308
"replace" : bigquery .WriteDisposition .WRITE_TRUNCATE ,
324
309
"append" : bigquery .WriteDisposition .WRITE_APPEND ,
325
310
}
326
- sql = self .to_sql (array_value , ordered = False , col_id_overrides = col_id_overrides )
311
+ sql = self .to_sql (array_value , ordered = False )
327
312
job_config = bigquery .QueryJobConfig (
328
313
write_disposition = dispositions [if_exists ],
329
314
destination = destination ,
@@ -340,15 +325,13 @@ def export_gbq(
340
325
def export_gcs (
341
326
self ,
342
327
array_value : bigframes .core .ArrayValue ,
343
- col_id_overrides : Mapping [str , str ],
344
328
uri : str ,
345
329
format : Literal ["json" , "csv" , "parquet" ],
346
330
export_options : Mapping [str , Union [bool , str ]],
347
331
):
348
332
query_job = self .execute (
349
333
array_value ,
350
334
ordered = False ,
351
- col_id_overrides = col_id_overrides ,
352
335
use_explicit_destination = True ,
353
336
).query_job
354
337
result_table = query_job .destination
@@ -678,18 +661,3 @@ def generate_head_plan(node: nodes.BigFrameNode, n: int):
678
661
679
662
def generate_row_count_plan (node : nodes .BigFrameNode ):
680
663
return nodes .RowCountNode (node )
681
-
682
-
683
- def override_ids (
684
- node : nodes .BigFrameNode , col_id_overrides : Mapping [str , str ]
685
- ) -> nodes .SelectionNode :
686
- output_ids = [col_id_overrides .get (id , id ) for id in node .schema .names ]
687
- return nodes .SelectionNode (
688
- node ,
689
- tuple (
690
- nodes .AliasedRef (
691
- expression .DerefOp (old_id ), bigframes .core .identifiers .ColumnId (out_id )
692
- )
693
- for old_id , out_id in zip (node .ids , output_ids )
694
- ),
695
- )
0 commit comments