@@ -171,9 +171,7 @@ def _collect_to_arrow(self) -> pa.Table:
171171 else : # pragma: no cover
172172 raise
173173 else :
174- # NOTE: See https://github.com/narwhals-dev/narwhals/pull/2051#discussion_r1969224309
175- to_arrow : Incomplete = self ._native_frame .toArrow
176- return to_arrow ()
174+ return self ._native_frame .toArrow ()
177175
178176 def _iter_columns (self ) -> Iterator [Column ]:
179177 for col in self .columns :
@@ -229,7 +227,7 @@ def collect(
229227 raise ValueError (msg ) # pragma: no cover
230228
231229 def simple_select (self : Self , * column_names : str ) -> Self :
232- return self ._from_native_frame (self ._native_frame .select (* column_names )) # pyright: ignore[reportArgumentType]
230+ return self ._from_native_frame (self ._native_frame .select (* column_names ))
233231
234232 def aggregate (
235233 self : Self ,
@@ -238,7 +236,7 @@ def aggregate(
238236 new_columns = evaluate_exprs (self , * exprs )
239237
240238 new_columns_list = [col .alias (col_name ) for col_name , col in new_columns ]
241- return self ._from_native_frame (self ._native_frame .agg (* new_columns_list )) # pyright: ignore[reportArgumentType]
239+ return self ._from_native_frame (self ._native_frame .agg (* new_columns_list ))
242240
243241 def select (
244242 self : Self ,
@@ -253,17 +251,17 @@ def select(
253251 return self ._from_native_frame (spark_df )
254252
255253 new_columns_list = [col .alias (col_name ) for (col_name , col ) in new_columns ]
256- return self ._from_native_frame (self ._native_frame .select (* new_columns_list )) # pyright: ignore[reportArgumentType]
254+ return self ._from_native_frame (self ._native_frame .select (* new_columns_list ))
257255
258256 def with_columns (self : Self , * exprs : SparkLikeExpr ) -> Self :
259257 new_columns = evaluate_exprs (self , * exprs )
260- return self ._from_native_frame (self ._native_frame .withColumns (dict (new_columns ))) # pyright: ignore[reportArgumentType]
258+ return self ._from_native_frame (self ._native_frame .withColumns (dict (new_columns )))
261259
262260 def filter (self : Self , predicate : SparkLikeExpr ) -> Self :
263261 # `[0]` is safe as the predicate's expression only returns a single column
264262 condition = predicate ._call (self )[0 ]
265- spark_df = self ._native_frame .where (condition ) # pyright: ignore[reportArgumentType]
266- return self ._from_native_frame (spark_df ) # pyright: ignore[reportArgumentType]
263+ spark_df = self ._native_frame .where (condition )
264+ return self ._from_native_frame (spark_df )
267265
268266 @property
269267 def schema (self : Self ) -> dict [str , DType ]:
@@ -272,8 +270,7 @@ def schema(self: Self) -> dict[str, DType]:
272270 field .name : native_to_narwhals_dtype (
273271 dtype = field .dataType ,
274272 version = self ._version ,
275- # NOTE: Unclear if this is an unsafe hash (https://github.com/narwhals-dev/narwhals/pull/2051#discussion_r1970074662)
276- spark_types = self ._native_dtypes , # pyright: ignore[reportArgumentType]
273+ spark_types = self ._native_dtypes ,
277274 )
278275 for field in self ._native_frame .schema
279276 }
@@ -286,10 +283,10 @@ def drop(self: Self, columns: list[str], strict: bool) -> Self: # noqa: FBT001
286283 columns_to_drop = parse_columns_to_drop (
287284 compliant_frame = self , columns = columns , strict = strict
288285 )
289- return self ._from_native_frame (self ._native_frame .drop (* columns_to_drop )) # pyright: ignore[reportArgumentType]
286+ return self ._from_native_frame (self ._native_frame .drop (* columns_to_drop ))
290287
291288 def head (self : Self , n : int ) -> Self :
292- return self ._from_native_frame (self ._native_frame .limit (num = n )) # pyright: ignore[reportArgumentType]
289+ return self ._from_native_frame (self ._native_frame .limit (num = n ))
293290
294291 def group_by (self : Self , * keys : str , drop_null_keys : bool ) -> SparkLikeLazyGroupBy :
295292 from narwhals ._spark_like .group_by import SparkLikeLazyGroupBy
@@ -319,18 +316,18 @@ def sort(
319316 )
320317
321318 sort_cols = [sort_f (col ) for col , sort_f in zip (by , sort_funcs )]
322- return self ._from_native_frame (self ._native_frame .sort (* sort_cols )) # pyright: ignore[reportArgumentType]
319+ return self ._from_native_frame (self ._native_frame .sort (* sort_cols ))
323320
324321 def drop_nulls (self : Self , subset : list [str ] | None ) -> Self :
325- return self ._from_native_frame (self ._native_frame .dropna (subset = subset )) # pyright: ignore[reportArgumentType]
322+ return self ._from_native_frame (self ._native_frame .dropna (subset = subset ))
326323
327324 def rename (self : Self , mapping : dict [str , str ]) -> Self :
328325 rename_mapping = {
329326 colname : mapping .get (colname , colname ) for colname in self .columns
330327 }
331328 return self ._from_native_frame (
332329 self ._native_frame .select (
333- [self ._F .col (old ).alias (new ) for old , new in rename_mapping .items ()] # pyright: ignore[reportArgumentType]
330+ [self ._F .col (old ).alias (new ) for old , new in rename_mapping .items ()]
334331 )
335332 )
336333
@@ -344,7 +341,7 @@ def unique(
344341 msg = "`LazyFrame.unique` with PySpark backend only supports `keep='any'`."
345342 raise ValueError (msg )
346343 check_column_exists (self .columns , subset )
347- return self ._from_native_frame (self ._native_frame .dropDuplicates (subset = subset )) # pyright: ignore[reportArgumentType]
344+ return self ._from_native_frame (self ._native_frame .dropDuplicates (subset = subset ))
348345
349346 def join (
350347 self : Self ,
@@ -388,7 +385,7 @@ def join(
388385 ]
389386 )
390387 return self ._from_native_frame (
391- self_native .join (other_native , on = left_on , how = how ).select (col_order ) # pyright: ignore[reportArgumentType]
388+ self_native .join (other_native , on = left_on , how = how ).select (col_order )
392389 )
393390
394391 def explode (self : Self , columns : list [str ]) -> Self :
@@ -424,7 +421,7 @@ def explode(self: Self, columns: list[str]) -> Self:
424421 else self ._F .explode_outer (col_name ).alias (col_name )
425422 for col_name in column_names
426423 ]
427- ), # pyright: ignore[reportArgumentType]
424+ )
428425 )
429426 elif self ._implementation .is_sqlframe ():
430427 # Not every sqlframe dialect supports `explode_outer` function
@@ -445,14 +442,14 @@ def null_condition(col_name: str) -> Column:
445442 for col_name in column_names
446443 ]
447444 ).union (
448- native_frame .filter (null_condition (columns [0 ])).select ( # pyright: ignore[reportArgumentType]
445+ native_frame .filter (null_condition (columns [0 ])).select (
449446 * [
450447 self ._F .col (col_name ).alias (col_name )
451448 if col_name != columns [0 ]
452449 else self ._F .lit (None ).alias (col_name )
453450 for col_name in column_names
454451 ]
455- ) # pyright: ignore[reportArgumentType]
452+ )
456453 ),
457454 )
458455 else : # pragma: no cover
@@ -487,4 +484,4 @@ def unpivot(
487484 )
488485 if index is None :
489486 unpivoted_native_frame = unpivoted_native_frame .drop (* ids )
490- return self ._from_native_frame (unpivoted_native_frame ) # pyright: ignore[reportArgumentType]
487+ return self ._from_native_frame (unpivoted_native_frame )
0 commit comments