@@ -98,12 +98,14 @@ def load_scalars_table(
9898 data_path : str = "scalars.jsonl" ,
9999 source_format = enums .SourceFormat .NEWLINE_DELIMITED_JSON ,
100100 schema_source = "scalars_schema.json" ,
101+ timestamp_target_precision = None ,
101102) -> str :
102103 schema = bigquery_client .schema_from_json (DATA_DIR / schema_source )
103104 table_id = data_path .replace ("." , "_" ) + hex (random .randrange (1000000 ))
104105 job_config = bigquery .LoadJobConfig ()
105106 job_config .schema = schema
106107 job_config .source_format = source_format
108+ job_config .timestamp_target_precision = timestamp_target_precision
107109 full_table_id = f"{ project_id } .{ dataset_id } .{ table_id } "
108110 with open (DATA_DIR / data_path , "rb" ) as data_file :
109111 job = bigquery_client .load_table_from_file (
@@ -169,6 +171,23 @@ def scalars_table_csv(
169171 bigquery_client .delete_table (full_table_id , not_found_ok = True )
170172
171173
174+ @pytest .fixture (scope = "session" )
175+ def scalars_table_pico (
176+ bigquery_client : bigquery .Client , project_id : str , dataset_id : str
177+ ):
178+ full_table_id = load_scalars_table (
179+ bigquery_client ,
180+ project_id ,
181+ dataset_id ,
182+ data_path = "pico.csv" ,
183+ source_format = enums .SourceFormat .CSV ,
184+ schema_source = "pico_schema.json" ,
185+ timestamp_target_precision = [12 ],
186+ )
187+ yield full_table_id
188+ bigquery_client .delete_table (full_table_id , not_found_ok = True )
189+
190+
172191@pytest .fixture
173192def test_table_name (request , replace_non_anum = re .compile (r"[^a-zA-Z0-9_]" ).sub ):
174193 return replace_non_anum ("_" , request .node .name )
0 commit comments