@@ -2744,7 +2744,8 @@ class ReadFromBigQuery(PTransform):
27442744 :data:`True` for most scenarios in order to catch errors as early as
27452745 possible (pipeline construction instead of pipeline execution). It
27462746 should be :data:`False` if the table is created during pipeline
2747- execution by a previous step.
2747+ execution by a previous step. Set this to :data:`False`
2748+ if the BigQuery export method is slow due to checking file existence.
27482749 coder (~apache_beam.coders.coders.Coder): The coder for the table
27492750 rows. If :data:`None`, then the default coder is
27502751 _JsonToDictCoder, which will interpret every row as a JSON
@@ -3033,7 +3034,8 @@ class ReadAllFromBigQuery(PTransform):
30333034 bucket where the extracted table should be written as a string. If
30343035 :data:`None`, then the temp_location parameter is used.
30353036 validate (bool): If :data:`True`, various checks will be done when source
3036- gets initialized (e.g., is table present?).
3037+ gets initialized (e.g., is table present?). Set this to :data:`False`
3038+ if the BigQuery export method is slow due to checking file existence.
30373039 kms_key (str): Experimental. Optional Cloud KMS key name for use when
30383040 creating new temporary tables.
30393041 """
@@ -3078,6 +3080,7 @@ def expand(self, pcoll):
30783080 _BigQueryReadSplit (
30793081 options = pcoll .pipeline .options ,
30803082 gcs_location = self .gcs_location ,
3083+ validate = self .validate ,
30813084 bigquery_job_labels = self .bigquery_job_labels ,
30823085 job_name = job_name ,
30833086 step_name = step_name ,
0 commit comments