@@ -323,19 +323,6 @@ def _read_parquet(
323323 s3_additional_kwargs : Optional [Dict [str , Any ]],
324324 arrow_kwargs : Dict [str , Any ],
325325) -> Union [pd .DataFrame , Iterator [pd .DataFrame ]]:
326- if config .distributed :
327- dataset = read_datasource (
328- datasource = ParquetDatasource (), # type: ignore
329- parallelism = parallelism ,
330- use_threads = use_threads ,
331- paths = paths ,
332- schema = schema ,
333- columns = columns ,
334- coerce_int96_timestamp_unit = coerce_int96_timestamp_unit ,
335- path_root = path_root ,
336- )
337- return _to_modin (dataset = dataset , to_pandas_kwargs = arrow_kwargs )
338-
339326 if chunked :
340327 return _read_parquet_chunked (
341328 boto3_session = boto3_session ,
@@ -350,6 +337,19 @@ def _read_parquet(
350337 version_ids = version_ids ,
351338 )
352339
340+ if config .distributed :
341+ dataset = read_datasource (
342+ datasource = ParquetDatasource (), # type: ignore
343+ parallelism = parallelism ,
344+ use_threads = use_threads ,
345+ paths = paths ,
346+ schema = schema ,
347+ columns = columns ,
348+ coerce_int96_timestamp_unit = coerce_int96_timestamp_unit ,
349+ path_root = path_root ,
350+ )
351+ return _to_modin (dataset = dataset , to_pandas_kwargs = arrow_kwargs )
352+
353353 executor = _get_executor (use_threads = use_threads )
354354 tables = executor .map (
355355 _read_parquet_file ,
0 commit comments