2929 from .group import GroupedData
3030 from .session import SparkSession
3131
32- from . functions import _to_column_expr , col , lit
32+ from duckdb . experimental . spark . sql import functions as spark_sql_functions
3333
3434
3535class DataFrame : # noqa: D101
@@ -438,7 +438,7 @@ def sort(self, *cols: Union[str, Column, list[Union[str, Column]]], **kwargs: An
438438 for c in cols :
439439 _c = c
440440 if isinstance (c , str ):
441- _c = col (c )
441+ _c = spark_sql_functions . col (c )
442442 elif isinstance (c , int ) and not isinstance (c , bool ):
443443 # ordinal is 1-based
444444 if c > 0 :
@@ -466,7 +466,7 @@ def sort(self, *cols: Union[str, Column, list[Union[str, Column]]], **kwargs: An
466466 message_parameters = {"arg_name" : "ascending" , "arg_type" : type (ascending ).__name__ },
467467 )
468468
469- columns = [_to_column_expr (c ) for c in columns ]
469+ columns = [spark_sql_functions . _to_column_expr (c ) for c in columns ]
470470 rel = self .relation .sort (* columns )
471471 return DataFrame (rel , self .session )
472472
@@ -678,7 +678,7 @@ def join(
678678 if on is not None and not all (isinstance (x , str ) for x in on ):
679679 assert isinstance (on , list )
680680 # Get (or create) the Expressions from the list of Columns
681- on = [_to_column_expr (x ) for x in on ]
681+ on = [spark_sql_functions . _to_column_expr (x ) for x in on ]
682682
683683 # & all the Expressions together to form one Expression
684684 assert isinstance (on [0 ], Expression ), "on should be Column or list of Column"
@@ -882,7 +882,7 @@ def __getitem__(self, item: Union[int, str, Column, list, tuple]) -> Union[Colum
882882 elif isinstance (item , (list , tuple )):
883883 return self .select (* item )
884884 elif isinstance (item , int ):
885- return col (self ._schema [item ].name )
885+ return spark_sql_functions . col (self ._schema [item ].name )
886886 else :
887887 msg = f"Unexpected item type: { type (item )} "
888888 raise TypeError (msg )
@@ -904,7 +904,7 @@ def __getattr__(self, name: str) -> Column:
904904 def groupBy (self , * cols : "ColumnOrName" ) -> "GroupedData" : ...
905905
906906 @overload
907- def groupBy (self , __cols : Union [list [Column ], list [str ]]) -> "GroupedData" : ...
907+ def groupBy (self , __cols : Union [list [Column ], list [str ]]) -> "GroupedData" : ... # noqa: PYI063
908908
909909 def groupBy (self , * cols : "ColumnOrName" ) -> "GroupedData" : # type: ignore[misc]
910910 """Groups the :class:`DataFrame` using the specified columns,
@@ -1094,7 +1094,7 @@ def unionByName(self, other: "DataFrame", allowMissingColumns: bool = False) ->
10941094 if col in other .relation .columns :
10951095 cols .append (col )
10961096 else :
1097- cols .append (lit (None ))
1097+ cols .append (spark_sql_functions . lit (None ))
10981098 other = other .select (* cols )
10991099 else :
11001100 other = other .select (* self .relation .columns )
0 commit comments