| 
 | 1 | +# Licensed to the Apache Software Foundation (ASF) under one  | 
 | 2 | +# or more contributor license agreements.  See the NOTICE file  | 
 | 3 | +# distributed with this work for additional information  | 
 | 4 | +# regarding copyright ownership.  The ASF licenses this file  | 
 | 5 | +# to you under the Apache License, Version 2.0 (the  | 
 | 6 | +# "License"); you may not use this file except in compliance  | 
 | 7 | +# with the License.  You may obtain a copy of the License at  | 
 | 8 | +#  | 
 | 9 | +#   http://www.apache.org/licenses/LICENSE-2.0  | 
 | 10 | +#  | 
 | 11 | +# Unless required by applicable law or agreed to in writing,  | 
 | 12 | +# software distributed under the License is distributed on an  | 
 | 13 | +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY  | 
 | 14 | +# KIND, either express or implied.  See the License for the  | 
 | 15 | +# specific language governing permissions and limitations  | 
 | 16 | +# under the License.  | 
 | 17 | + | 
 | 18 | +"""IO read functions using global context."""  | 
 | 19 | + | 
 | 20 | +import pathlib  | 
 | 21 | + | 
 | 22 | +from datafusion.dataframe import DataFrame  | 
 | 23 | +from datafusion.expr import Expr  | 
 | 24 | +import pyarrow  | 
 | 25 | +from ._internal import SessionContext as SessionContextInternal  | 
 | 26 | + | 
 | 27 | + | 
 | 28 | +def read_parquet(  | 
 | 29 | +    path: str | pathlib.Path,  | 
 | 30 | +    table_partition_cols: list[tuple[str, str]] | None = None,  | 
 | 31 | +    parquet_pruning: bool = True,  | 
 | 32 | +    file_extension: str = ".parquet",  | 
 | 33 | +    skip_metadata: bool = True,  | 
 | 34 | +    schema: pyarrow.Schema | None = None,  | 
 | 35 | +    file_sort_order: list[list[Expr]] | None = None,  | 
 | 36 | +) -> DataFrame:  | 
 | 37 | +    """Read a Parquet source into a :py:class:`~datafusion.dataframe.Dataframe`.  | 
 | 38 | +
  | 
 | 39 | +    Args:  | 
 | 40 | +        path: Path to the Parquet file.  | 
 | 41 | +        table_partition_cols: Partition columns.  | 
 | 42 | +        parquet_pruning: Whether the parquet reader should use the predicate  | 
 | 43 | +            to prune row groups.  | 
 | 44 | +        file_extension: File extension; only files with this extension are  | 
 | 45 | +            selected for data input.  | 
 | 46 | +        skip_metadata: Whether the parquet reader should skip any metadata  | 
 | 47 | +            that may be in the file schema. This can help avoid schema  | 
 | 48 | +            conflicts due to metadata.  | 
 | 49 | +        schema: An optional schema representing the parquet files. If None,  | 
 | 50 | +            the parquet reader will try to infer it based on data in the  | 
 | 51 | +            file.  | 
 | 52 | +        file_sort_order: Sort order for the file.  | 
 | 53 | +
  | 
 | 54 | +    Returns:  | 
 | 55 | +        DataFrame representation of the read Parquet files  | 
 | 56 | +    """  | 
 | 57 | +    if table_partition_cols is None:  | 
 | 58 | +        table_partition_cols = []  | 
 | 59 | +    return DataFrame(  | 
 | 60 | +        SessionContextInternal._global_ctx().read_parquet(  | 
 | 61 | +            str(path),  | 
 | 62 | +            table_partition_cols,  | 
 | 63 | +            parquet_pruning,  | 
 | 64 | +            file_extension,  | 
 | 65 | +            skip_metadata,  | 
 | 66 | +            schema,  | 
 | 67 | +            file_sort_order,  | 
 | 68 | +        )  | 
 | 69 | +    )  | 
 | 70 | + | 
 | 71 | + | 
 | 72 | +def read_json(  | 
 | 73 | +    path: str | pathlib.Path,  | 
 | 74 | +    schema: pyarrow.Schema | None = None,  | 
 | 75 | +    schema_infer_max_records: int = 1000,  | 
 | 76 | +    file_extension: str = ".json",  | 
 | 77 | +    table_partition_cols: list[tuple[str, str]] | None = None,  | 
 | 78 | +    file_compression_type: str | None = None,  | 
 | 79 | +) -> DataFrame:  | 
 | 80 | +    """Read a line-delimited JSON data source.  | 
 | 81 | +
  | 
 | 82 | +    Args:  | 
 | 83 | +        path: Path to the JSON file.  | 
 | 84 | +        schema: The data source schema.  | 
 | 85 | +        schema_infer_max_records: Maximum number of rows to read from JSON  | 
 | 86 | +            files for schema inference if needed.  | 
 | 87 | +        file_extension: File extension; only files with this extension are  | 
 | 88 | +            selected for data input.  | 
 | 89 | +        table_partition_cols: Partition columns.  | 
 | 90 | +        file_compression_type: File compression type.  | 
 | 91 | +
  | 
 | 92 | +    Returns:  | 
 | 93 | +        DataFrame representation of the read JSON files.  | 
 | 94 | +    """  | 
 | 95 | +    if table_partition_cols is None:  | 
 | 96 | +        table_partition_cols = []  | 
 | 97 | +    return DataFrame(  | 
 | 98 | +        SessionContextInternal._global_ctx().read_json(  | 
 | 99 | +            str(path),  | 
 | 100 | +            schema,  | 
 | 101 | +            schema_infer_max_records,  | 
 | 102 | +            file_extension,  | 
 | 103 | +            table_partition_cols,  | 
 | 104 | +            file_compression_type,  | 
 | 105 | +        )  | 
 | 106 | +    )  | 
 | 107 | + | 
 | 108 | + | 
 | 109 | +def read_csv(  | 
 | 110 | +    path: str | pathlib.Path | list[str] | list[pathlib.Path],  | 
 | 111 | +    schema: pyarrow.Schema | None = None,  | 
 | 112 | +    has_header: bool = True,  | 
 | 113 | +    delimiter: str = ",",  | 
 | 114 | +    schema_infer_max_records: int = 1000,  | 
 | 115 | +    file_extension: str = ".csv",  | 
 | 116 | +    table_partition_cols: list[tuple[str, str]] | None = None,  | 
 | 117 | +    file_compression_type: str | None = None,  | 
 | 118 | +) -> DataFrame:  | 
 | 119 | +    """Read a CSV data source.  | 
 | 120 | +
  | 
 | 121 | +    Args:  | 
 | 122 | +        path: Path to the CSV file  | 
 | 123 | +        schema: An optional schema representing the CSV files. If None, the  | 
 | 124 | +            CSV reader will try to infer it based on data in file.  | 
 | 125 | +        has_header: Whether the CSV file have a header. If schema inference  | 
 | 126 | +            is run on a file with no headers, default column names are  | 
 | 127 | +            created.  | 
 | 128 | +        delimiter: An optional column delimiter.  | 
 | 129 | +        schema_infer_max_records: Maximum number of rows to read from CSV  | 
 | 130 | +            files for schema inference if needed.  | 
 | 131 | +        file_extension:  File extension; only files with this extension are  | 
 | 132 | +            selected for data input.  | 
 | 133 | +        table_partition_cols:  Partition columns.  | 
 | 134 | +        file_compression_type:  File compression type.  | 
 | 135 | +
  | 
 | 136 | +    Returns:  | 
 | 137 | +        DataFrame representation of the read CSV files  | 
 | 138 | +    """  | 
 | 139 | +    if table_partition_cols is None:  | 
 | 140 | +        table_partition_cols = []  | 
 | 141 | + | 
 | 142 | +    path = [str(p) for p in path] if isinstance(path, list) else str(path)  | 
 | 143 | + | 
 | 144 | +    return DataFrame(  | 
 | 145 | +        SessionContextInternal._global_ctx().read_csv(  | 
 | 146 | +            path,  | 
 | 147 | +            schema,  | 
 | 148 | +            has_header,  | 
 | 149 | +            delimiter,  | 
 | 150 | +            schema_infer_max_records,  | 
 | 151 | +            file_extension,  | 
 | 152 | +            table_partition_cols,  | 
 | 153 | +            file_compression_type,  | 
 | 154 | +        )  | 
 | 155 | +    )  | 
 | 156 | + | 
 | 157 | + | 
 | 158 | +def read_avro(  | 
 | 159 | +    path: str | pathlib.Path,  | 
 | 160 | +    schema: pyarrow.Schema | None = None,  | 
 | 161 | +    file_partition_cols: list[tuple[str, str]] | None = None,  | 
 | 162 | +    file_extension: str = ".avro",  | 
 | 163 | +) -> DataFrame:  | 
 | 164 | +    """Create a :py:class:`DataFrame` for reading Avro data source.  | 
 | 165 | +
  | 
 | 166 | +    Args:  | 
 | 167 | +        path: Path to the Avro file.  | 
 | 168 | +        schema: The data source schema.  | 
 | 169 | +        file_partition_cols: Partition columns.  | 
 | 170 | +        file_extension: File extension to select.  | 
 | 171 | +
  | 
 | 172 | +    Returns:  | 
 | 173 | +        DataFrame representation of the read Avro file  | 
 | 174 | +    """  | 
 | 175 | +    if file_partition_cols is None:  | 
 | 176 | +        file_partition_cols = []  | 
 | 177 | +    return DataFrame(  | 
 | 178 | +        SessionContextInternal._global_ctx().read_avro(  | 
 | 179 | +            str(path), schema, file_partition_cols, file_extension  | 
 | 180 | +        )  | 
 | 181 | +    )  | 
0 commit comments