@@ -1213,7 +1213,12 @@ def _read_parquet_init(
12131213 fs : s3fs .S3FileSystem = _utils .get_fs (session = boto3_session , s3_additional_kwargs = s3_additional_kwargs )
12141214 cpus : int = _utils .ensure_cpu_count (use_threads = use_threads )
12151215 data : pyarrow .parquet .ParquetDataset = pyarrow .parquet .ParquetDataset (
1216- path_or_paths = path_or_paths , filesystem = fs , metadata_nthreads = cpus , filters = filters , read_dictionary = categories , validate_schema = validate_schema
1216+ path_or_paths = path_or_paths ,
1217+ filesystem = fs ,
1218+ metadata_nthreads = cpus ,
1219+ filters = filters ,
1220+ read_dictionary = categories ,
1221+ validate_schema = validate_schema ,
12171222 )
12181223 return data
12191224
@@ -1228,7 +1233,7 @@ def read_parquet(
12281233 use_threads : bool = True ,
12291234 boto3_session : Optional [boto3 .Session ] = None ,
12301235 s3_additional_kwargs : Optional [Dict [str , str ]] = None ,
1231- validate_schema : bool = True
1236+ validate_schema : bool = True ,
12321237) -> Union [pd .DataFrame , Iterator [pd .DataFrame ]]:
12331238 """Read Apache Parquet file(s) from from a received S3 prefix or list of S3 objects paths.
12341239
@@ -1312,7 +1317,7 @@ def read_parquet(
13121317 use_threads = use_threads ,
13131318 boto3_session = boto3_session ,
13141319 s3_additional_kwargs = s3_additional_kwargs ,
1315- validate_schema = validate_schema
1320+ validate_schema = validate_schema ,
13161321 )
13171322 if chunked is False :
13181323 return _read_parquet (data = data , columns = columns , categories = categories , use_threads = use_threads )
0 commit comments