Skip to content

Commit e64efb6

Browse files
authored
chore(typing): Resolve sqlframe>=3.24.0 errors (#2187)
1 parent 01830c1 commit e64efb6

File tree

2 files changed

+18
-18
lines changed

2 files changed

+18
-18
lines changed

narwhals/_spark_like/dataframe.py

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ def collect(
250250
raise ValueError(msg) # pragma: no cover
251251

252252
def simple_select(self: Self, *column_names: str) -> Self:
253-
return self._from_native_frame(self._native_frame.select(*column_names))
253+
return self._from_native_frame(self._native_frame.select(*column_names)) # pyright: ignore[reportArgumentType]
254254

255255
def aggregate(
256256
self: Self,
@@ -259,7 +259,7 @@ def aggregate(
259259
new_columns = evaluate_exprs(self, *exprs)
260260

261261
new_columns_list = [col.alias(col_name) for col_name, col in new_columns]
262-
return self._from_native_frame(self._native_frame.agg(*new_columns_list))
262+
return self._from_native_frame(self._native_frame.agg(*new_columns_list)) # pyright: ignore[reportArgumentType]
263263

264264
def select(
265265
self: Self,
@@ -274,17 +274,17 @@ def select(
274274
return self._from_native_frame(spark_df)
275275

276276
new_columns_list = [col.alias(col_name) for (col_name, col) in new_columns]
277-
return self._from_native_frame(self._native_frame.select(*new_columns_list))
277+
return self._from_native_frame(self._native_frame.select(*new_columns_list)) # pyright: ignore[reportArgumentType]
278278

279279
def with_columns(self: Self, *exprs: SparkLikeExpr) -> Self:
280280
new_columns = evaluate_exprs(self, *exprs)
281-
return self._from_native_frame(self._native_frame.withColumns(dict(new_columns)))
281+
return self._from_native_frame(self._native_frame.withColumns(dict(new_columns))) # pyright: ignore[reportArgumentType]
282282

283283
def filter(self: Self, predicate: SparkLikeExpr) -> Self:
284284
# `[0]` is safe as the predicate's expression only returns a single column
285285
condition = predicate._call(self)[0]
286-
spark_df = self._native_frame.where(condition)
287-
return self._from_native_frame(spark_df)
286+
spark_df = self._native_frame.where(condition) # pyright: ignore[reportArgumentType]
287+
return self._from_native_frame(spark_df) # pyright: ignore[reportArgumentType]
288288

289289
@property
290290
def schema(self: Self) -> dict[str, DType]:
@@ -307,10 +307,10 @@ def drop(self: Self, columns: list[str], strict: bool) -> Self: # noqa: FBT001
307307
columns_to_drop = parse_columns_to_drop(
308308
compliant_frame=self, columns=columns, strict=strict
309309
)
310-
return self._from_native_frame(self._native_frame.drop(*columns_to_drop))
310+
return self._from_native_frame(self._native_frame.drop(*columns_to_drop)) # pyright: ignore[reportArgumentType]
311311

312312
def head(self: Self, n: int) -> Self:
313-
return self._from_native_frame(self._native_frame.limit(num=n))
313+
return self._from_native_frame(self._native_frame.limit(num=n)) # pyright: ignore[reportArgumentType]
314314

315315
def group_by(self: Self, *keys: str, drop_null_keys: bool) -> SparkLikeLazyGroupBy:
316316
from narwhals._spark_like.group_by import SparkLikeLazyGroupBy
@@ -340,18 +340,18 @@ def sort(
340340
)
341341

342342
sort_cols = [sort_f(col) for col, sort_f in zip(by, sort_funcs)]
343-
return self._from_native_frame(self._native_frame.sort(*sort_cols))
343+
return self._from_native_frame(self._native_frame.sort(*sort_cols)) # pyright: ignore[reportArgumentType]
344344

345345
def drop_nulls(self: Self, subset: list[str] | None) -> Self:
346-
return self._from_native_frame(self._native_frame.dropna(subset=subset))
346+
return self._from_native_frame(self._native_frame.dropna(subset=subset)) # pyright: ignore[reportArgumentType]
347347

348348
def rename(self: Self, mapping: dict[str, str]) -> Self:
349349
rename_mapping = {
350350
colname: mapping.get(colname, colname) for colname in self.columns
351351
}
352352
return self._from_native_frame(
353353
self._native_frame.select(
354-
[self._F.col(old).alias(new) for old, new in rename_mapping.items()]
354+
[self._F.col(old).alias(new) for old, new in rename_mapping.items()] # pyright: ignore[reportArgumentType]
355355
)
356356
)
357357

@@ -365,7 +365,7 @@ def unique(
365365
msg = "`LazyFrame.unique` with PySpark backend only supports `keep='any'`."
366366
raise ValueError(msg)
367367
check_column_exists(self.columns, subset)
368-
return self._from_native_frame(self._native_frame.dropDuplicates(subset=subset))
368+
return self._from_native_frame(self._native_frame.dropDuplicates(subset=subset)) # pyright: ignore[reportArgumentType]
369369

370370
def join(
371371
self: Self,
@@ -409,7 +409,7 @@ def join(
409409
]
410410
)
411411
return self._from_native_frame(
412-
self_native.join(other_native, on=left_on, how=how).select(col_order)
412+
self_native.join(other_native, on=left_on, how=how).select(col_order) # pyright: ignore[reportArgumentType]
413413
)
414414

415415
def explode(self: Self, columns: list[str]) -> Self:
@@ -445,7 +445,7 @@ def explode(self: Self, columns: list[str]) -> Self:
445445
else self._F.explode_outer(col_name).alias(col_name)
446446
for col_name in column_names
447447
]
448-
),
448+
), # pyright: ignore[reportArgumentType]
449449
)
450450
elif self._implementation.is_sqlframe():
451451
# Not every sqlframe dialect supports `explode_outer` function
@@ -466,14 +466,14 @@ def null_condition(col_name: str) -> Column:
466466
for col_name in column_names
467467
]
468468
).union(
469-
native_frame.filter(null_condition(columns[0])).select(
469+
native_frame.filter(null_condition(columns[0])).select( # pyright: ignore[reportArgumentType]
470470
*[
471471
self._F.col(col_name).alias(col_name)
472472
if col_name != columns[0]
473473
else self._F.lit(None).alias(col_name)
474474
for col_name in column_names
475475
]
476-
)
476+
) # pyright: ignore[reportArgumentType]
477477
),
478478
)
479479
else: # pragma: no cover
@@ -508,4 +508,4 @@ def unpivot(
508508
)
509509
if index is None:
510510
unpivoted_native_frame = unpivoted_native_frame.drop(*ids)
511-
return self._from_native_frame(unpivoted_native_frame)
511+
return self._from_native_frame(unpivoted_native_frame) # pyright: ignore[reportArgumentType]

narwhals/_spark_like/group_by.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def agg(self: Self, *exprs: SparkLikeExpr) -> SparkLikeLazyFrame:
5050

5151
if not agg_columns:
5252
return self._compliant_frame._from_native_frame(
53-
self._compliant_frame._native_frame.select(*self._keys).dropDuplicates()
53+
self._compliant_frame._native_frame.select(*self._keys).dropDuplicates() # pyright: ignore[reportArgumentType]
5454
)
5555
return self._compliant_frame._from_native_frame(
5656
self._compliant_frame._native_frame.groupBy(*self._keys).agg(*agg_columns)

0 commit comments

Comments
 (0)