Skip to content

Commit fa0b363

Browse files
committed
Ruff UP006: dont use typing module for dict and list typehints
1 parent a3a271c commit fa0b363

23 files changed

+92
-92
lines changed

duckdb/experimental/spark/conf.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,20 +12,20 @@ def contains(self, key: str) -> bool:
1212
def get(self, key: str, defaultValue: Optional[str] = None) -> Optional[str]:
1313
raise ContributionsAcceptedError
1414

15-
def getAll(self) -> List[Tuple[str, str]]:
15+
def getAll(self) -> list[tuple[str, str]]:
1616
raise ContributionsAcceptedError
1717

1818
def set(self, key: str, value: str) -> "SparkConf":
1919
raise ContributionsAcceptedError
2020

21-
def setAll(self, pairs: List[Tuple[str, str]]) -> "SparkConf":
21+
def setAll(self, pairs: list[tuple[str, str]]) -> "SparkConf":
2222
raise ContributionsAcceptedError
2323

2424
def setAppName(self, value: str) -> "SparkConf":
2525
raise ContributionsAcceptedError
2626

2727
def setExecutorEnv(
28-
self, key: Optional[str] = None, value: Optional[str] = None, pairs: Optional[List[Tuple[str, str]]] = None
28+
self, key: Optional[str] = None, value: Optional[str] = None, pairs: Optional[list[tuple[str, str]]] = None
2929
) -> "SparkConf":
3030
raise ContributionsAcceptedError
3131

duckdb/experimental/spark/errors/exceptions/base.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ def __init__(
1313
# The error class, decides the message format, must be one of the valid options listed in 'error_classes.py'
1414
error_class: Optional[str] = None,
1515
# The dictionary listing the arguments specified in the message (or the error_class)
16-
message_parameters: Optional[Dict[str, str]] = None,
16+
message_parameters: Optional[dict[str, str]] = None,
1717
):
1818
# `message` vs `error_class` & `message_parameters` are mutually exclusive.
1919
assert (message is not None and (error_class is None and message_parameters is None)) or (
@@ -24,7 +24,7 @@ def __init__(
2424

2525
if message is None:
2626
self.message = self.error_reader.get_error_message(
27-
cast(str, error_class), cast(Dict[str, str], message_parameters)
27+
cast(str, error_class), cast(dict[str, str], message_parameters)
2828
)
2929
else:
3030
self.message = message
@@ -45,7 +45,7 @@ def getErrorClass(self) -> Optional[str]:
4545
"""
4646
return self.error_class
4747

48-
def getMessageParameters(self) -> Optional[Dict[str, str]]:
48+
def getMessageParameters(self) -> Optional[dict[str, str]]:
4949
"""
5050
Returns a message parameters as a dictionary.
5151

duckdb/experimental/spark/errors/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ class ErrorClassesReader:
2929
def __init__(self) -> None:
3030
self.error_info_map = ERROR_CLASSES_MAP
3131

32-
def get_error_message(self, error_class: str, message_parameters: Dict[str, str]) -> str:
32+
def get_error_message(self, error_class: str, message_parameters: dict[str, str]) -> str:
3333
"""
3434
Returns the completed error message by applying message parameters to the message template.
3535
"""

duckdb/experimental/spark/sql/_typing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@
5757
float,
5858
)
5959

60-
RowLike = TypeVar("RowLike", List[Any], Tuple[Any, ...], types.Row)
60+
RowLike = TypeVar("RowLike", list[Any], tuple[Any, ...], types.Row)
6161

6262
SQLBatchedUDFType = Literal[100]
6363

duckdb/experimental/spark/sql/catalog.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ class Catalog:
3636
def __init__(self, session: SparkSession):
3737
self._session = session
3838

39-
def listDatabases(self) -> List[Database]:
39+
def listDatabases(self) -> list[Database]:
4040
res = self._session.conn.sql('select database_name from duckdb_databases()').fetchall()
4141

4242
def transform_to_database(x) -> Database:
@@ -45,7 +45,7 @@ def transform_to_database(x) -> Database:
4545
databases = [transform_to_database(x) for x in res]
4646
return databases
4747

48-
def listTables(self) -> List[Table]:
48+
def listTables(self) -> list[Table]:
4949
res = self._session.conn.sql('select table_name, database_name, sql, temporary from duckdb_tables()').fetchall()
5050

5151
def transform_to_table(x) -> Table:
@@ -54,7 +54,7 @@ def transform_to_table(x) -> Table:
5454
tables = [transform_to_table(x) for x in res]
5555
return tables
5656

57-
def listColumns(self, tableName: str, dbName: Optional[str] = None) -> List[Column]:
57+
def listColumns(self, tableName: str, dbName: Optional[str] = None) -> list[Column]:
5858
query = f"""
5959
select column_name, data_type, is_nullable from duckdb_columns() where table_name = '{tableName}'
6060
"""
@@ -68,7 +68,7 @@ def transform_to_column(x) -> Column:
6868
columns = [transform_to_column(x) for x in res]
6969
return columns
7070

71-
def listFunctions(self, dbName: Optional[str] = None) -> List[Function]:
71+
def listFunctions(self, dbName: Optional[str] = None) -> list[Function]:
7272
raise NotImplementedError
7373

7474
def setCurrentDatabase(self, dbName: str) -> None:

duckdb/experimental/spark/sql/column.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -234,10 +234,10 @@ def cast(self, dataType: Union[DataType, str]) -> "Column":
234234
def isin(self, *cols: Any) -> "Column":
235235
if len(cols) == 1 and isinstance(cols[0], (list, set)):
236236
# Only one argument supplied, it's a list
237-
cols = cast(Tuple, cols[0])
237+
cols = cast(tuple, cols[0])
238238

239239
cols = cast(
240-
Tuple,
240+
tuple,
241241
[_get_expr(c) for c in cols],
242242
)
243243
return Column(self.expr.isin(*cols))

duckdb/experimental/spark/sql/dataframe.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ def withColumn(self, columnName: str, col: Column) -> "DataFrame":
143143
rel = self.relation.select(*cols)
144144
return DataFrame(rel, self.session)
145145

146-
def withColumns(self, *colsMap: Dict[str, Column]) -> "DataFrame":
146+
def withColumns(self, *colsMap: dict[str, Column]) -> "DataFrame":
147147
"""
148148
Returns a new :class:`DataFrame` by adding multiple columns or replacing the
149149
existing columns that have the same names.
@@ -218,7 +218,7 @@ def withColumns(self, *colsMap: Dict[str, Column]) -> "DataFrame":
218218
rel = self.relation.select(*cols)
219219
return DataFrame(rel, self.session)
220220

221-
def withColumnsRenamed(self, colsMap: Dict[str, str]) -> "DataFrame":
221+
def withColumnsRenamed(self, colsMap: dict[str, str]) -> "DataFrame":
222222
"""
223223
Returns a new :class:`DataFrame` by renaming multiple columns.
224224
This is a no-op if the schema doesn't contain the given column names.
@@ -356,7 +356,7 @@ def transform(
356356
return result
357357

358358
def sort(
359-
self, *cols: Union[str, Column, List[Union[str, Column]]], **kwargs: Any
359+
self, *cols: Union[str, Column, list[Union[str, Column]]], **kwargs: Any
360360
) -> "DataFrame":
361361
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
362362
@@ -487,15 +487,15 @@ def sort(
487487

488488
orderBy = sort
489489

490-
def head(self, n: Optional[int] = None) -> Union[Optional[Row], List[Row]]:
490+
def head(self, n: Optional[int] = None) -> Union[Optional[Row], list[Row]]:
491491
if n is None:
492492
rs = self.head(1)
493493
return rs[0] if rs else None
494494
return self.take(n)
495495

496496
first = head
497497

498-
def take(self, num: int) -> List[Row]:
498+
def take(self, num: int) -> list[Row]:
499499
return self.limit(num).collect()
500500

501501
def filter(self, condition: "ColumnOrName") -> "DataFrame":
@@ -579,7 +579,7 @@ def select(self, *cols) -> "DataFrame":
579579
return DataFrame(rel, self.session)
580580

581581
@property
582-
def columns(self) -> List[str]:
582+
def columns(self) -> list[str]:
583583
"""Returns all column names as a list.
584584
585585
Examples
@@ -589,20 +589,20 @@ def columns(self) -> List[str]:
589589
"""
590590
return [f.name for f in self.schema.fields]
591591

592-
def _ipython_key_completions_(self) -> List[str]:
592+
def _ipython_key_completions_(self) -> list[str]:
593593
# Provides tab-completion for column names in PySpark DataFrame
594594
# when accessed in bracket notation, e.g. df['<TAB>]
595595
return self.columns
596596

597-
def __dir__(self) -> List[str]:
597+
def __dir__(self) -> list[str]:
598598
out = set(super().__dir__())
599599
out.update(c for c in self.columns if c.isidentifier() and not iskeyword(c))
600600
return sorted(out)
601601

602602
def join(
603603
self,
604604
other: "DataFrame",
605-
on: Optional[Union[str, List[str], Column, List[Column]]] = None,
605+
on: Optional[Union[str, list[str], Column, list[Column]]] = None,
606606
how: Optional[str] = None,
607607
) -> "DataFrame":
608608
"""Joins with another :class:`DataFrame`, using the given join expression.
@@ -704,7 +704,7 @@ def join(
704704
assert isinstance(
705705
on[0], Expression
706706
), "on should be Column or list of Column"
707-
on = reduce(lambda x, y: x.__and__(y), cast(List[Expression], on))
707+
on = reduce(lambda x, y: x.__and__(y), cast(list[Expression], on))
708708

709709

710710
if on is None and how is None:
@@ -893,11 +893,11 @@ def __getitem__(self, item: Union[int, str]) -> Column:
893893
...
894894

895895
@overload
896-
def __getitem__(self, item: Union[Column, List, Tuple]) -> "DataFrame":
896+
def __getitem__(self, item: Union[Column, list, tuple]) -> "DataFrame":
897897
...
898898

899899
def __getitem__(
900-
self, item: Union[int, str, Column, List, Tuple]
900+
self, item: Union[int, str, Column, list, tuple]
901901
) -> Union[Column, "DataFrame"]:
902902
"""Returns the column as a :class:`Column`.
903903
@@ -942,7 +942,7 @@ def groupBy(self, *cols: "ColumnOrName") -> "GroupedData":
942942
...
943943

944944
@overload
945-
def groupBy(self, __cols: Union[List[Column], List[str]]) -> "GroupedData":
945+
def groupBy(self, __cols: Union[list[Column], list[str]]) -> "GroupedData":
946946
...
947947

948948
def groupBy(self, *cols: "ColumnOrName") -> "GroupedData": # type: ignore[misc]
@@ -1259,7 +1259,7 @@ def exceptAll(self, other: "DataFrame") -> "DataFrame":
12591259
"""
12601260
return DataFrame(self.relation.except_(other.relation), self.session)
12611261

1262-
def dropDuplicates(self, subset: Optional[List[str]] = None) -> "DataFrame":
1262+
def dropDuplicates(self, subset: Optional[list[str]] = None) -> "DataFrame":
12631263
"""Return a new :class:`DataFrame` with duplicate rows removed,
12641264
optionally only considering certain columns.
12651265
@@ -1391,7 +1391,7 @@ def toDF(self, *cols) -> "DataFrame":
13911391
new_rel = self.relation.project(*projections)
13921392
return DataFrame(new_rel, self.session)
13931393

1394-
def collect(self) -> List[Row]:
1394+
def collect(self) -> list[Row]:
13951395
columns = self.relation.columns
13961396
result = self.relation.fetchall()
13971397

duckdb/experimental/spark/sql/functions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ def struct(*cols: Column) -> Column:
111111

112112

113113
def array(
114-
*cols: Union["ColumnOrName", Union[List["ColumnOrName"], Tuple["ColumnOrName", ...]]]
114+
*cols: Union["ColumnOrName", Union[list["ColumnOrName"], tuple["ColumnOrName", ...]]]
115115
) -> Column:
116116
"""Creates a new array column.
117117

duckdb/experimental/spark/sql/group.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ def avg(self, *cols: str) -> DataFrame:
177177
if len(columns) == 0:
178178
schema = self._df.schema
179179
# Take only the numeric types of the relation
180-
columns: List[str] = [x.name for x in schema.fields if isinstance(x.dataType, NumericType)]
180+
columns: list[str] = [x.name for x in schema.fields if isinstance(x.dataType, NumericType)]
181181
return _api_internal(self, "avg", *columns)
182182

183183
@df_varargs_api
@@ -312,10 +312,10 @@ def agg(self, *exprs: Column) -> DataFrame:
312312
...
313313

314314
@overload
315-
def agg(self, __exprs: Dict[str, str]) -> DataFrame:
315+
def agg(self, __exprs: dict[str, str]) -> DataFrame:
316316
...
317317

318-
def agg(self, *exprs: Union[Column, Dict[str, str]]) -> DataFrame:
318+
def agg(self, *exprs: Union[Column, dict[str, str]]) -> DataFrame:
319319
"""Compute aggregates and returns the result as a :class:`DataFrame`.
320320
321321
The available aggregate functions can be:

duckdb/experimental/spark/sql/readwriter.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def parquet(
2626
self,
2727
path: str,
2828
mode: Optional[str] = None,
29-
partitionBy: Union[str, List[str], None] = None,
29+
partitionBy: Union[str, list[str], None] = None,
3030
compression: Optional[str] = None,
3131
) -> None:
3232
relation = self.dataframe.relation
@@ -94,7 +94,7 @@ def __init__(self, session: "SparkSession"):
9494

9595
def load(
9696
self,
97-
path: Optional[Union[str, List[str]]] = None,
97+
path: Optional[Union[str, list[str]]] = None,
9898
format: Optional[str] = None,
9999
schema: Optional[Union[StructType, str]] = None,
100100
**options: OptionalPrimitiveType,
@@ -131,7 +131,7 @@ def load(
131131

132132
def csv(
133133
self,
134-
path: Union[str, List[str]],
134+
path: Union[str, list[str]],
135135
schema: Optional[Union[StructType, str]] = None,
136136
sep: Optional[str] = None,
137137
encoding: Optional[str] = None,
@@ -263,7 +263,7 @@ def parquet(self, *paths: str, **options: "OptionalPrimitiveType") -> "DataFrame
263263

264264
def json(
265265
self,
266-
path: Union[str, List[str]],
266+
path: Union[str, list[str]],
267267
schema: Optional[Union[StructType, str]] = None,
268268
primitivesAsString: Optional[Union[bool, str]] = None,
269269
prefersDecimal: Optional[Union[bool, str]] = None,

0 commit comments

Comments
 (0)