Skip to content

Commit 45d4ce9

Browse files
committed
annotation fix
1 parent 9796db7 commit 45d4ce9

File tree

6 files changed

+14
-12
lines changed

6 files changed

+14
-12
lines changed

python/pyarrow-stubs/pyarrow/_ipc.pyi

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ class _ReadPandasMixin:
175175
def read_pandas(self, **options) -> pd.DataFrame: ...
176176

177177

178-
class RecordBatchReader(_Weakrefable):
178+
class RecordBatchReader(_ReadPandasMixin, _Weakrefable):
179179
def __iter__(self) -> Self: ...
180180
def read_next_batch(self) -> RecordBatch: ...
181181

@@ -191,7 +191,6 @@ class RecordBatchReader(_Weakrefable):
191191

192192
def read_all(self) -> Table: ...
193193

194-
read_pandas = _ReadPandasMixin.read_pandas
195194
def close(self) -> None: ...
196195

197196
def __enter__(self) -> Self: ...
@@ -237,7 +236,7 @@ class RecordBatchWithMetadata(NamedTuple):
237236
custom_metadata: KeyValueMetadata
238237

239238

240-
class _RecordBatchFileReader(_Weakrefable):
239+
class _RecordBatchFileReader(_ReadPandasMixin, _Weakrefable):
241240
@property
242241
def num_record_batches(self) -> int: ...
243242

@@ -248,7 +247,6 @@ class _RecordBatchFileReader(_Weakrefable):
248247

249248
def read_all(self) -> Table: ...
250249

251-
read_pandas = _ReadPandasMixin.read_pandas
252250
def __enter__(self) -> Self: ...
253251
def __exit__(self, exc_type, exc_val, exc_tb): ...
254252
@property

python/pyarrow-stubs/pyarrow/compute.pyi

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -654,7 +654,7 @@ def round(
654654
def round_to_multiple(
655655
x: _NumericScalarT | _NumericArrayT | list | Expression,
656656
/,
657-
multiple: int = 0,
657+
multiple: int | float | NumericScalar = 1.0,
658658
round_mode: Literal[
659659
"down",
660660
"up",
@@ -1521,7 +1521,7 @@ def local_timestamp(
15211521
def random(
15221522
n: int,
15231523
*,
1524-
initializer: Literal["system"] | int = "system",
1524+
initializer: Literal["system"] | int | bytes = "system",
15251525
options: RandomOptions | None = None,
15261526
memory_pool: lib.MemoryPool | None = None,
15271527
) -> lib.DoubleArray: ...
@@ -1533,7 +1533,7 @@ def random(
15331533
def cumulative_sum(
15341534
values: _NumericArrayT | ArrayLike | Expression,
15351535
/,
1536-
start: lib.Scalar | None = None,
1536+
start: int | float | lib.Scalar | None = None,
15371537
*,
15381538
skip_nulls: bool = False,
15391539
options: CumulativeSumOptions | None = None,

python/pyarrow/parquet/core.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1413,7 +1413,7 @@ def __init__(self, path_or_paths, filesystem=None, schema=None, *, filters=None,
14131413
else:
14141414
single_file = path_or_paths
14151415

1416-
parquet_format = ds.ParquetFileFormat(**read_options)
1416+
parquet_format = ds.ParquetFileFormat(**read_options) # type: ignore[invalid-argument-type]
14171417

14181418
if single_file is not None:
14191419
fragment = parquet_format.make_fragment(single_file, filesystem)

python/pyarrow/tests/parquet/test_dataset.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -971,7 +971,7 @@ def _test_write_to_dataset_with_partitions(base_path,
971971
input_df_cols = input_df.columns.tolist()
972972
assert partition_by == input_df_cols[-1 * len(partition_by):]
973973

974-
input_df = input_df[cols] # type: ignore[assignment]
974+
input_df = input_df.loc[:, cols]
975975
# Partitioned columns become 'categorical' dtypes
976976
for col in partition_by:
977977
output_df[col] = output_df[col].astype('category')
@@ -980,6 +980,7 @@ def _test_write_to_dataset_with_partitions(base_path,
980980
expected_date_type = schema.field('date').type.to_pandas_dtype()
981981
output_df["date"] = output_df["date"].astype(expected_date_type)
982982

983+
assert isinstance(input_df, pd.DataFrame)
983984
tm.assert_frame_equal(output_df, input_df)
984985

985986

@@ -1027,7 +1028,8 @@ def _test_write_to_dataset_no_partitions(base_path,
10271028
).read()
10281029
input_df = input_table.to_pandas()
10291030
input_df = input_df.drop_duplicates()
1030-
input_df = input_df[cols] # type: ignore[assignment]
1031+
input_df = input_df[cols]
1032+
assert isinstance(input_df, pd.DataFrame)
10311033
tm.assert_frame_equal(output_df, input_df)
10321034

10331035

python/pyarrow/tests/test_acero.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
# under the License.
1717

1818
import pytest
19+
from typing import Literal, cast
1920

2021
import pyarrow as pa
2122
import pyarrow.compute as pc
@@ -268,7 +269,8 @@ def test_order_by():
268269
table = pa.table({'a': [1, 2, 3, 4], 'b': [1, 3, None, 2]})
269270
table_source = Declaration("table_source", TableSourceNodeOptions(table))
270271

271-
ord_opts = OrderByNodeOptions([("b", "ascending")])
272+
sort_keys: list[tuple[str, Literal["ascending", "descending"]]] = [("b", "ascending")]
273+
ord_opts = OrderByNodeOptions(sort_keys)
272274
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
273275
result = decl.to_table()
274276
expected = pa.table({"a": [1, 4, 2, 3], "b": [1, 2, 3, None]})

python/pyarrow/tests/test_fs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -402,7 +402,7 @@ def py_fsspec_s3fs(request, s3_server):
402402

403403
yield dict(
404404
fs=fs,
405-
pathfn=bucket.__add__,
405+
pathfn=lambda p: bucket + p,
406406
allow_move_dir=False,
407407
allow_append_to_file=True,
408408
)

0 commit comments

Comments
 (0)