Skip to content

Commit e19a519

Browse files
committed
annotation fix
1 parent 9796db7 commit e19a519

File tree

9 files changed

+25
-16
lines changed

9 files changed

+25
-16
lines changed

python/pyarrow-stubs/pyarrow/_compute.pyi

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -391,7 +391,7 @@ class RoundTemporalOptions(FunctionOptions):
391391

392392

393393
class RoundToMultipleOptions(FunctionOptions):
394-
def __init__(self, multiple: float = 1.0,
394+
def __init__(self, multiple: int | float | lib.Scalar = 1.0,
395395
round_mode: _RoundMode = "half_to_even") -> None: ...
396396

397397

python/pyarrow-stubs/pyarrow/_ipc.pyi

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ class _ReadPandasMixin:
175175
def read_pandas(self, **options) -> pd.DataFrame: ...
176176

177177

178-
class RecordBatchReader(_Weakrefable):
178+
class RecordBatchReader(_ReadPandasMixin, _Weakrefable):
179179
def __iter__(self) -> Self: ...
180180
def read_next_batch(self) -> RecordBatch: ...
181181

@@ -191,7 +191,6 @@ class RecordBatchReader(_Weakrefable):
191191

192192
def read_all(self) -> Table: ...
193193

194-
read_pandas = _ReadPandasMixin.read_pandas
195194
def close(self) -> None: ...
196195

197196
def __enter__(self) -> Self: ...
@@ -237,7 +236,7 @@ class RecordBatchWithMetadata(NamedTuple):
237236
custom_metadata: KeyValueMetadata
238237

239238

240-
class _RecordBatchFileReader(_Weakrefable):
239+
class _RecordBatchFileReader(_ReadPandasMixin, _Weakrefable):
241240
@property
242241
def num_record_batches(self) -> int: ...
243242

@@ -248,7 +247,6 @@ class _RecordBatchFileReader(_Weakrefable):
248247

249248
def read_all(self) -> Table: ...
250249

251-
read_pandas = _ReadPandasMixin.read_pandas
252250
def __enter__(self) -> Self: ...
253251
def __exit__(self, exc_type, exc_val, exc_tb): ...
254252
@property

python/pyarrow-stubs/pyarrow/compute.pyi

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -654,7 +654,7 @@ def round(
654654
def round_to_multiple(
655655
x: _NumericScalarT | _NumericArrayT | list | Expression,
656656
/,
657-
multiple: int = 0,
657+
multiple: int | float | NumericScalar = 1.0,
658658
round_mode: Literal[
659659
"down",
660660
"up",
@@ -1521,7 +1521,7 @@ def local_timestamp(
15211521
def random(
15221522
n: int,
15231523
*,
1524-
initializer: Literal["system"] | int = "system",
1524+
initializer: str | int | bytes = "system",
15251525
options: RandomOptions | None = None,
15261526
memory_pool: lib.MemoryPool | None = None,
15271527
) -> lib.DoubleArray: ...
@@ -1533,7 +1533,7 @@ def random(
15331533
def cumulative_sum(
15341534
values: _NumericArrayT | ArrayLike | Expression,
15351535
/,
1536-
start: lib.Scalar | None = None,
1536+
start: int | float | lib.Scalar | None = None,
15371537
*,
15381538
skip_nulls: bool = False,
15391539
options: CumulativeSumOptions | None = None,

python/pyarrow-stubs/pyarrow/lib.pyi

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,14 @@ class MonthDayNano(NamedTuple):
4141
months: int
4242
nanoseconds: int
4343

44-
def __init__(self, *args, **kwargs) -> None: ... # type: ignore[misc]
44+
def __new__(
45+
cls,
46+
sequence: tuple[int, int, int] | list[int] = (),
47+
) -> MonthDayNano: ...
48+
def __init__(
49+
self,
50+
sequence: tuple[int, int, int] | list[int] = (),
51+
) -> None: ...
4552

4653

4754
def cpu_count() -> int: ...

python/pyarrow/parquet/core.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1413,7 +1413,7 @@ def __init__(self, path_or_paths, filesystem=None, schema=None, *, filters=None,
14131413
else:
14141414
single_file = path_or_paths
14151415

1416-
parquet_format = ds.ParquetFileFormat(**read_options)
1416+
parquet_format = ds.ParquetFileFormat(**read_options) # type: ignore[invalid-argument-type]
14171417

14181418
if single_file is not None:
14191419
fragment = parquet_format.make_fragment(single_file, filesystem)

python/pyarrow/tests/parquet/test_dataset.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -971,7 +971,7 @@ def _test_write_to_dataset_with_partitions(base_path,
971971
input_df_cols = input_df.columns.tolist()
972972
assert partition_by == input_df_cols[-1 * len(partition_by):]
973973

974-
input_df = input_df[cols] # type: ignore[assignment]
974+
input_df = input_df.loc[:, cols]
975975
# Partitioned columns become 'categorical' dtypes
976976
for col in partition_by:
977977
output_df[col] = output_df[col].astype('category')
@@ -980,6 +980,7 @@ def _test_write_to_dataset_with_partitions(base_path,
980980
expected_date_type = schema.field('date').type.to_pandas_dtype()
981981
output_df["date"] = output_df["date"].astype(expected_date_type)
982982

983+
assert isinstance(input_df, pd.DataFrame)
983984
tm.assert_frame_equal(output_df, input_df)
984985

985986

@@ -1027,7 +1028,8 @@ def _test_write_to_dataset_no_partitions(base_path,
10271028
).read()
10281029
input_df = input_table.to_pandas()
10291030
input_df = input_df.drop_duplicates()
1030-
input_df = input_df[cols] # type: ignore[assignment]
1031+
input_df = input_df[cols]
1032+
assert isinstance(input_df, pd.DataFrame)
10311033
tm.assert_frame_equal(output_df, input_df)
10321034

10331035

python/pyarrow/tests/test_acero.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
# under the License.
1717

1818
import pytest
19+
from typing import Literal, cast
1920

2021
import pyarrow as pa
2122
import pyarrow.compute as pc
@@ -268,7 +269,8 @@ def test_order_by():
268269
table = pa.table({'a': [1, 2, 3, 4], 'b': [1, 3, None, 2]})
269270
table_source = Declaration("table_source", TableSourceNodeOptions(table))
270271

271-
ord_opts = OrderByNodeOptions([("b", "ascending")])
272+
sort_keys: list[tuple[str, Literal["ascending", "descending"]]] = [("b", "ascending")]
273+
ord_opts = OrderByNodeOptions(sort_keys)
272274
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
273275
result = decl.to_table()
274276
expected = pa.table({"a": [1, 4, 2, 3], "b": [1, 2, 3, None]})

python/pyarrow/tests/test_compute.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3190,7 +3190,7 @@ def test_cumulative_sum(start, skip_nulls):
31903190

31913191
for strt in ['a', pa.scalar('arrow'), 1.1]:
31923192
with pytest.raises(pa.ArrowInvalid):
3193-
pc.cumulative_sum([1, 2, 3], start=strt)
3193+
pc.cumulative_sum([1, 2, 3], start=strt) # type: ignore[arg-type]
31943194

31953195

31963196
@pytest.mark.numpy

python/pyarrow/tests/test_fs.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ def gcsfs(request, gcs_server):
234234

235235
yield dict(
236236
fs=fs,
237-
pathfn=bucket.__add__,
237+
pathfn=lambda p: bucket + p,
238238
allow_move_dir=False,
239239
allow_append_to_file=False,
240240
)
@@ -402,7 +402,7 @@ def py_fsspec_s3fs(request, s3_server):
402402

403403
yield dict(
404404
fs=fs,
405-
pathfn=bucket.__add__,
405+
pathfn=lambda p: bucket + p,
406406
allow_move_dir=False,
407407
allow_append_to_file=True,
408408
)

0 commit comments

Comments
 (0)