Skip to content

Commit 6e3053d

Browse files
committed
Merge branch 'main' into woops
2 parents 7619093 + 4ee17b3 commit 6e3053d

File tree

27 files changed

+173
-248
lines changed

27 files changed

+173
-248
lines changed

doc/source/whatsnew/v0.21.0.rst

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -635,22 +635,17 @@ Previous behavior:
635635
636636
New behavior:
637637

638-
.. code-block:: ipython
638+
.. ipython:: python
639639
640-
In [1]: pi = pd.period_range('2017-01', periods=12, freq='M')
640+
pi = pd.period_range('2017-01', periods=12, freq='M')
641641
642-
In [2]: s = pd.Series(np.arange(12), index=pi)
642+
s = pd.Series(np.arange(12), index=pi)
643643
644-
In [3]: resampled = s.resample('2Q').mean()
644+
resampled = s.resample('2Q').mean()
645645
646-
In [4]: resampled
647-
Out[4]:
648-
2017Q1 2.5
649-
2017Q3 8.5
650-
Freq: 2Q-DEC, dtype: float64
646+
resampled
651647
652-
In [5]: resampled.index
653-
Out[5]: PeriodIndex(['2017Q1', '2017Q3'], dtype='period[2Q-DEC]')
648+
resampled.index
654649
655650
Upsampling and calling ``.ohlc()`` previously returned a ``Series``, basically identical to calling ``.asfreq()``. OHLC upsampling now returns a DataFrame with columns ``open``, ``high``, ``low`` and ``close`` (:issue:`13083`). This is consistent with downsampling and ``DatetimeIndex`` behavior.
656651

doc/source/whatsnew/v2.2.0.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -664,7 +664,7 @@ Other Deprecations
664664
- Deprecated :meth:`DatetimeArray.__init__` and :meth:`TimedeltaArray.__init__`, use :func:`array` instead (:issue:`55623`)
665665
- Deprecated :meth:`Index.format`, use ``index.astype(str)`` or ``index.map(formatter)`` instead (:issue:`55413`)
666666
- Deprecated :meth:`Series.ravel`, the underlying array is already 1D, so ravel is not necessary (:issue:`52511`)
667-
- Deprecated :meth:`Series.resample` and :meth:`DataFrame.resample` with a :class:`PeriodIndex` (and the 'convention' keyword), convert to :class:`DatetimeIndex` (with ``.to_timestamp()``) before resampling instead (:issue:`53481`)
667+
- Deprecated :meth:`Series.resample` and :meth:`DataFrame.resample` with a :class:`PeriodIndex` (and the 'convention' keyword), convert to :class:`DatetimeIndex` (with ``.to_timestamp()``) before resampling instead (:issue:`53481`). Note: this deprecation was later undone in pandas 2.3.3 (:issue:`57033`)
668668
- Deprecated :meth:`Series.view`, use :meth:`Series.astype` instead to change the dtype (:issue:`20251`)
669669
- Deprecated :meth:`offsets.Tick.is_anchored`, use ``False`` instead (:issue:`55388`)
670670
- Deprecated ``core.internals`` members ``Block``, ``ExtensionBlock``, and ``DatetimeTZBlock``, use public APIs instead (:issue:`55139`)

doc/source/whatsnew/v2.3.3.rst

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ Bug fixes
4747
- Fix regression in ``~Series.str.contains``, ``~Series.str.match`` and ``~Series.str.fullmatch``
4848
with a compiled regex and custom flags (:issue:`62240`)
4949
- Fix :meth:`Series.str.match` and :meth:`Series.str.fullmatch` not matching patterns with groups correctly for the Arrow-backed string dtype (:issue:`61072`)
50-
50+
- Fix comparing a :class:`StringDtype` Series with mixed objects raising an error (:issue:`60228`)
5151

5252
Improvements and fixes for Copy-on-Write
5353
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -57,6 +57,14 @@ Bug fixes
5757

5858
- The :meth:`DataFrame.iloc` now works correctly with ``copy_on_write`` option when assigning values after subsetting the columns of a homogeneous DataFrame (:issue:`60309`)
5959

60+
Other changes
61+
~~~~~~~~~~~~~
62+
63+
- The deprecation of using :meth:`Series.resample` and :meth:`DataFrame.resample`
64+
with a :class:`PeriodIndex` (and the 'convention' keyword) has been undone.
65+
Resampling with a :class:`PeriodIndex` is supported again, but a subset of
66+
methods that return incorrect results will raise an error in pandas 3.0 (:issue:`57033`)
67+
6068

6169
.. ---------------------------------------------------------------------------
6270
.. _whatsnew_233.contributors:

pandas/_typing.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,7 @@
8383

8484
# numpy compatible types
8585
NumpyValueArrayLike: TypeAlias = ScalarLike_co | npt.ArrayLike
86-
# Name "npt._ArrayLikeInt_co" is not defined [name-defined]
87-
NumpySorter: TypeAlias = npt._ArrayLikeInt_co | None # type: ignore[name-defined]
86+
NumpySorter: TypeAlias = npt._ArrayLikeInt_co | None
8887

8988

9089
P = ParamSpec("P")

pandas/core/algorithms.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -215,14 +215,15 @@ def _reconstruct_data(
215215
# that values.dtype == dtype
216216
cls = dtype.construct_array_type()
217217

218-
# error: Incompatible types in assignment (expression has type
219-
# "ExtensionArray", variable has type "ndarray[Any, Any]")
220-
values = cls._from_sequence(values, dtype=dtype) # type: ignore[assignment]
221-
222-
else:
223-
values = values.astype(dtype, copy=False)
224-
225-
return values
218+
# error: Incompatible return value type
219+
# (got "ExtensionArray",
220+
# expected "ndarray[tuple[Any, ...], dtype[Any]]")
221+
return cls._from_sequence(values, dtype=dtype) # type: ignore[return-value]
222+
223+
# error: Incompatible return value type
224+
# (got "ndarray[tuple[Any, ...], dtype[Any]]",
225+
# expected "ExtensionArray")
226+
return values.astype(dtype, copy=False) # type: ignore[return-value]
226227

227228

228229
def _ensure_arraylike(values, func_name: str) -> ArrayLike:

pandas/core/array_algos/quantile.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def quantile_with_mask(
102102
interpolation=interpolation,
103103
)
104104

105-
result = np.asarray(result) # type: ignore[assignment]
105+
result = np.asarray(result)
106106
result = result.T
107107

108108
return result

pandas/core/arrays/_mixins.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,9 @@ def view(self, dtype: Dtype | None = None) -> ArrayLike:
151151

152152
td64_values = arr.view(dtype)
153153
return TimedeltaArray._simple_new(td64_values, dtype=dtype)
154-
return arr.view(dtype=dtype)
154+
# error: Argument "dtype" to "view" of "ndarray" has incompatible type
155+
# "ExtensionDtype | dtype[Any]"; expected "dtype[Any] | _HasDType[dtype[Any]]"
156+
return arr.view(dtype=dtype) # type: ignore[arg-type]
155157

156158
def take(
157159
self,

pandas/core/arrays/arrow/_arrow_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def pyarrow_array_to_numpy_and_mask(
4444
mask = pyarrow.BooleanArray.from_buffers(
4545
pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset
4646
)
47-
mask = np.asarray(mask) # type: ignore[assignment]
47+
mask = np.asarray(mask)
4848
else:
4949
mask = np.ones(len(arr), dtype=bool)
5050
return data, mask

pandas/core/arrays/arrow/array.py

Lines changed: 20 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -658,7 +658,7 @@ def _box_pa_array(
658658
):
659659
arr_value = np.asarray(value, dtype=object)
660660
# similar to isna(value) but exclude NaN, NaT, nat-like, nan-like
661-
mask = is_pdna_or_none(arr_value) # type: ignore[assignment]
661+
mask = is_pdna_or_none(arr_value)
662662

663663
try:
664664
pa_array = pa.array(value, type=pa_type, mask=mask)
@@ -884,22 +884,27 @@ def _cmp_method(self, other, op) -> ArrowExtensionArray:
884884
ltype = self._pa_array.type
885885

886886
if isinstance(other, (ExtensionArray, np.ndarray, list)):
887-
boxed = self._box_pa(other)
888-
rtype = boxed.type
889-
if (pa.types.is_timestamp(ltype) and pa.types.is_date(rtype)) or (
890-
pa.types.is_timestamp(rtype) and pa.types.is_date(ltype)
891-
):
892-
# GH#62157 match non-pyarrow behavior
893-
result = ops.invalid_comparison(self, other, op)
894-
result = pa.array(result, type=pa.bool_())
887+
try:
888+
boxed = self._box_pa(other)
889+
except pa.lib.ArrowInvalid:
890+
# e.g. GH#60228 [1, "b"] we have to operate pointwise
891+
res_values = [op(x, y) for x, y in zip(self, other)]
892+
result = pa.array(res_values, type=pa.bool_(), from_pandas=True)
895893
else:
896-
try:
897-
result = pc_func(self._pa_array, boxed)
898-
except pa.ArrowNotImplementedError:
899-
# TODO: could this be wrong if other is object dtype?
900-
# in which case we need to operate pointwise?
894+
rtype = boxed.type
895+
if (pa.types.is_timestamp(ltype) and pa.types.is_date(rtype)) or (
896+
pa.types.is_timestamp(rtype) and pa.types.is_date(ltype)
897+
):
898+
# GH#62157 match non-pyarrow behavior
901899
result = ops.invalid_comparison(self, other, op)
902900
result = pa.array(result, type=pa.bool_())
901+
else:
902+
try:
903+
result = pc_func(self._pa_array, boxed)
904+
except pa.ArrowNotImplementedError:
905+
result = ops.invalid_comparison(self, other, op)
906+
result = pa.array(result, type=pa.bool_())
907+
903908
elif is_scalar(other):
904909
if (isinstance(other, datetime) and pa.types.is_date(ltype)) or (
905910
type(other) is date and pa.types.is_timestamp(ltype)
@@ -2739,7 +2744,7 @@ def _str_get_dummies(self, sep: str = "|", dtype: NpDtype | None = None):
27392744
dummies_dtype = np.bool_
27402745
dummies = np.zeros(n_rows * n_cols, dtype=dummies_dtype)
27412746
dummies[indices] = True
2742-
dummies = dummies.reshape((n_rows, n_cols)) # type: ignore[assignment]
2747+
dummies = dummies.reshape((n_rows, n_cols))
27432748
result = self._from_pyarrow_array(pa.array(list(dummies)))
27442749
return result, uniques_sorted.to_pylist()
27452750

pandas/core/arrays/categorical.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1869,7 +1869,7 @@ def value_counts(self, dropna: bool = True) -> Series:
18691869
count = np.bincount(obs, minlength=ncat or 0)
18701870
else:
18711871
count = np.bincount(np.where(mask, code, ncat))
1872-
ix = np.append(ix, -1) # type: ignore[assignment]
1872+
ix = np.append(ix, -1)
18731873

18741874
ix = coerce_indexer_dtype(ix, self.dtype.categories)
18751875
ix_categorical = self._from_backing_data(ix)

0 commit comments

Comments
 (0)