Skip to content

Commit 17c184a

Browse files
committed
Enabled ruff rule ICN001
1 parent b3c33a1 commit 17c184a

File tree

2 files changed

+22
-24
lines changed

2 files changed

+22
-24
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ ignore = [
102102
# "EM102",
103103
"ERA001",
104104
# "SIM108",
105-
"ICN001",
105+
# "ICN001",
106106
"ANN001",
107107
"ANN202",
108108
"PTH",

python/datafusion/context.py

Lines changed: 21 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,9 @@
4040
if TYPE_CHECKING:
4141
import pathlib
4242

43-
import pandas
44-
import polars
45-
import pyarrow
43+
import pandas as pd
44+
import polars as pl
45+
import pyarrow as pa
4646

4747
from datafusion.plan import ExecutionPlan, LogicalPlan
4848

@@ -537,7 +537,7 @@ def register_listing_table(
537537
path: str | pathlib.Path,
538538
table_partition_cols: list[tuple[str, str]] | None = None,
539539
file_extension: str = ".parquet",
540-
schema: pyarrow.Schema | None = None,
540+
schema: pa.Schema | None = None,
541541
file_sort_order: list[list[Expr | SortExpr]] | None = None,
542542
) -> None:
543543
"""Register multiple files as a single table.
@@ -606,14 +606,14 @@ def sql_with_options(self, query: str, options: SQLOptions) -> DataFrame:
606606

607607
def create_dataframe(
608608
self,
609-
partitions: list[list[pyarrow.RecordBatch]],
609+
partitions: list[list[pa.RecordBatch]],
610610
name: str | None = None,
611-
schema: pyarrow.Schema | None = None,
611+
schema: pa.Schema | None = None,
612612
) -> DataFrame:
613613
"""Create and return a dataframe using the provided partitions.
614614
615615
Args:
616-
partitions: :py:class:`pyarrow.RecordBatch` partitions to register.
616+
partitions: :py:class:`pa.RecordBatch` partitions to register.
617617
name: Resultant dataframe name.
618618
schema: Schema for the partitions.
619619
@@ -684,16 +684,14 @@ def from_arrow(
684684
return DataFrame(self.ctx.from_arrow(data, name))
685685

686686
@deprecated("Use ``from_arrow`` instead.")
687-
def from_arrow_table(
688-
self, data: pyarrow.Table, name: str | None = None
689-
) -> DataFrame:
687+
def from_arrow_table(self, data: pa.Table, name: str | None = None) -> DataFrame:
690688
"""Create a :py:class:`~datafusion.dataframe.DataFrame` from an Arrow table.
691689
692690
This is an alias for :py:func:`from_arrow`.
693691
"""
694692
return self.from_arrow(data, name)
695693

696-
def from_pandas(self, data: pandas.DataFrame, name: str | None = None) -> DataFrame:
694+
def from_pandas(self, data: pd.DataFrame, name: str | None = None) -> DataFrame:
697695
"""Create a :py:class:`~datafusion.dataframe.DataFrame` from a Pandas DataFrame.
698696
699697
Args:
@@ -705,7 +703,7 @@ def from_pandas(self, data: pandas.DataFrame, name: str | None = None) -> DataFr
705703
"""
706704
return DataFrame(self.ctx.from_pandas(data, name))
707705

708-
def from_polars(self, data: polars.DataFrame, name: str | None = None) -> DataFrame:
706+
def from_polars(self, data: pl.DataFrame, name: str | None = None) -> DataFrame:
709707
"""Create a :py:class:`~datafusion.dataframe.DataFrame` from a Polars DataFrame.
710708
711709
Args:
@@ -755,7 +753,7 @@ def register_table_provider(
755753
self.ctx.register_table_provider(name, provider)
756754

757755
def register_record_batches(
758-
self, name: str, partitions: list[list[pyarrow.RecordBatch]]
756+
self, name: str, partitions: list[list[pa.RecordBatch]]
759757
) -> None:
760758
"""Register record batches as a table.
761759
@@ -776,7 +774,7 @@ def register_parquet(
776774
parquet_pruning: bool = True,
777775
file_extension: str = ".parquet",
778776
skip_metadata: bool = True,
779-
schema: pyarrow.Schema | None = None,
777+
schema: pa.Schema | None = None,
780778
file_sort_order: list[list[SortExpr]] | None = None,
781779
) -> None:
782780
"""Register a Parquet file as a table.
@@ -817,7 +815,7 @@ def register_csv(
817815
self,
818816
name: str,
819817
path: str | pathlib.Path | list[str | pathlib.Path],
820-
schema: pyarrow.Schema | None = None,
818+
schema: pa.Schema | None = None,
821819
has_header: bool = True,
822820
delimiter: str = ",",
823821
schema_infer_max_records: int = 1000,
@@ -863,7 +861,7 @@ def register_json(
863861
self,
864862
name: str,
865863
path: str | pathlib.Path,
866-
schema: pyarrow.Schema | None = None,
864+
schema: pa.Schema | None = None,
867865
schema_infer_max_records: int = 1000,
868866
file_extension: str = ".json",
869867
table_partition_cols: list[tuple[str, str]] | None = None,
@@ -901,7 +899,7 @@ def register_avro(
901899
self,
902900
name: str,
903901
path: str | pathlib.Path,
904-
schema: pyarrow.Schema | None = None,
902+
schema: pa.Schema | None = None,
905903
file_extension: str = ".avro",
906904
table_partition_cols: list[tuple[str, str]] | None = None,
907905
) -> None:
@@ -923,8 +921,8 @@ def register_avro(
923921
name, str(path), schema, file_extension, table_partition_cols
924922
)
925923

926-
def register_dataset(self, name: str, dataset: pyarrow.dataset.Dataset) -> None:
927-
"""Register a :py:class:`pyarrow.dataset.Dataset` as a table.
924+
def register_dataset(self, name: str, dataset: pa.dataset.Dataset) -> None:
925+
"""Register a :py:class:`pa.dataset.Dataset` as a table.
928926
929927
Args:
930928
name: Name of the table to register.
@@ -975,7 +973,7 @@ def session_id(self) -> str:
975973
def read_json(
976974
self,
977975
path: str | pathlib.Path,
978-
schema: pyarrow.Schema | None = None,
976+
schema: pa.Schema | None = None,
979977
schema_infer_max_records: int = 1000,
980978
file_extension: str = ".json",
981979
table_partition_cols: list[tuple[str, str]] | None = None,
@@ -1012,7 +1010,7 @@ def read_json(
10121010
def read_csv(
10131011
self,
10141012
path: str | pathlib.Path | list[str] | list[pathlib.Path],
1015-
schema: pyarrow.Schema | None = None,
1013+
schema: pa.Schema | None = None,
10161014
has_header: bool = True,
10171015
delimiter: str = ",",
10181016
schema_infer_max_records: int = 1000,
@@ -1065,7 +1063,7 @@ def read_parquet(
10651063
parquet_pruning: bool = True,
10661064
file_extension: str = ".parquet",
10671065
skip_metadata: bool = True,
1068-
schema: pyarrow.Schema | None = None,
1066+
schema: pa.Schema | None = None,
10691067
file_sort_order: list[list[Expr | SortExpr]] | None = None,
10701068
) -> DataFrame:
10711069
"""Read a Parquet source into a :py:class:`~datafusion.dataframe.Dataframe`.
@@ -1110,7 +1108,7 @@ def read_parquet(
11101108
def read_avro(
11111109
self,
11121110
path: str | pathlib.Path,
1113-
schema: pyarrow.Schema | None = None,
1111+
schema: pa.Schema | None = None,
11141112
file_partition_cols: list[tuple[str, str]] | None = None,
11151113
file_extension: str = ".avro",
11161114
) -> DataFrame:

0 commit comments

Comments
 (0)