Skip to content

Commit 5a101c3

Browse files
committed
Merge branch 'main' into qcut-floating-point
2 parents 644a49f + 0cdc6a4 commit 5a101c3

File tree

111 files changed

+869
-364
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

111 files changed

+869
-364
lines changed

.circleci/config.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,8 +91,8 @@ jobs:
9191
name: Build aarch64 wheels
9292
no_output_timeout: 30m # Sometimes the tests won't generate any output, make sure the job doesn't get killed by that
9393
command: |
94-
pip3 install cibuildwheel==2.18.1
95-
cibuildwheel --prerelease-pythons --output-dir wheelhouse
94+
pip3 install cibuildwheel==2.20.0
95+
cibuildwheel --output-dir wheelhouse
9696
9797
environment:
9898
CIBW_BUILD: << parameters.cibw-build >>

.github/workflows/wheels.yml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,11 +158,10 @@ jobs:
158158
run: echo "sdist_name=$(cd ./dist && ls -d */)" >> "$GITHUB_ENV"
159159

160160
- name: Build wheels
161-
uses: pypa/cibuildwheel@v2.19.2
161+
uses: pypa/cibuildwheel@v2.20.0
162162
with:
163163
package-dir: ./dist/${{ startsWith(matrix.buildplat[1], 'macosx') && env.sdist_name || needs.build_sdist.outputs.sdist_file }}
164164
env:
165-
CIBW_PRERELEASE_PYTHONS: True
166165
CIBW_BUILD: ${{ matrix.python[0] }}-${{ matrix.buildplat[1] }}
167166
CIBW_BUILD_FRONTEND: ${{ matrix.cibw_build_frontend || 'pip' }}
168167
CIBW_PLATFORM: ${{ matrix.buildplat[1] == 'pyodide_wasm32' && 'pyodide' || 'auto' }}

ci/code_checks.sh

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
7070
--format=actions \
7171
-i ES01 `# For now it is ok if docstrings are missing the extended summary` \
7272
-i "pandas.Series.dt PR01" `# Accessors are implemented as classes, but we do not document the Parameters section` \
73-
-i "pandas.MultiIndex.get_level_values SA01" \
74-
-i "pandas.MultiIndex.get_loc_level PR07" \
7573
-i "pandas.MultiIndex.names SA01" \
7674
-i "pandas.MultiIndex.reorder_levels RT03,SA01" \
7775
-i "pandas.MultiIndex.sortlevel PR07,SA01" \
@@ -165,7 +163,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
165163
-i "pandas.Series.str.center RT03,SA01" \
166164
-i "pandas.Series.str.decode PR07,RT03,SA01" \
167165
-i "pandas.Series.str.encode PR07,RT03,SA01" \
168-
-i "pandas.Series.str.fullmatch RT03" \
169166
-i "pandas.Series.str.index RT03" \
170167
-i "pandas.Series.str.ljust RT03,SA01" \
171168
-i "pandas.Series.str.lower RT03" \
@@ -264,9 +261,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
264261
-i "pandas.api.extensions.ExtensionArray.tolist RT03,SA01" \
265262
-i "pandas.api.extensions.ExtensionArray.unique RT03,SA01" \
266263
-i "pandas.api.extensions.ExtensionArray.view SA01" \
267-
-i "pandas.api.indexers.VariableOffsetWindowIndexer PR01,SA01" \
268264
-i "pandas.api.interchange.from_dataframe RT03,SA01" \
269-
-i "pandas.api.types.is_any_real_numeric_dtype SA01" \
270265
-i "pandas.api.types.is_bool PR01,SA01" \
271266
-i "pandas.api.types.is_bool_dtype SA01" \
272267
-i "pandas.api.types.is_categorical_dtype SA01" \
@@ -288,13 +283,9 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
288283
-i "pandas.api.types.is_iterator PR07,SA01" \
289284
-i "pandas.api.types.is_list_like SA01" \
290285
-i "pandas.api.types.is_named_tuple PR07,SA01" \
291-
-i "pandas.api.types.is_numeric_dtype SA01" \
292286
-i "pandas.api.types.is_object_dtype SA01" \
293-
-i "pandas.api.types.is_period_dtype SA01" \
294287
-i "pandas.api.types.is_re PR07,SA01" \
295288
-i "pandas.api.types.is_re_compilable PR07,SA01" \
296-
-i "pandas.api.types.is_sparse SA01" \
297-
-i "pandas.api.types.is_timedelta64_ns_dtype SA01" \
298289
-i "pandas.api.types.pandas_dtype PR07,RT03,SA01" \
299290
-i "pandas.arrays.ArrowExtensionArray PR07,SA01" \
300291
-i "pandas.arrays.BooleanArray SA01" \

doc/source/development/contributing.rst

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,6 @@ If you are new to Git, you can reference some of these resources for learning Gi
7474
to the :ref:`contributor community <community>` for help if needed:
7575

7676
* `Git documentation <https://git-scm.com/doc>`_.
77-
* `Numpy's Git resources <https://numpy.org/doc/stable/dev/gitwash/git_resources.html>`_ tutorial.
7877

7978
Also, the project follows a forking workflow further described on this page whereby
8079
contributors fork the repository, make changes and then create a pull request.

doc/source/whatsnew/v3.0.0.rst

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ Other enhancements
5050
- :meth:`DataFrame.pivot_table` and :func:`pivot_table` now allow the passing of keyword arguments to ``aggfunc`` through ``**kwargs`` (:issue:`57884`)
5151
- :meth:`Series.cummin` and :meth:`Series.cummax` now supports :class:`CategoricalDtype` (:issue:`52335`)
5252
- :meth:`Series.plot` now correctly handle the ``ylabel`` parameter for pie charts, allowing for explicit control over the y-axis label (:issue:`58239`)
53+
- Multiplying two :class:`DateOffset` objects will now raise a ``TypeError`` instead of a ``RecursionError`` (:issue:`59442`)
5354
- Restore support for reading Stata 104-format and enable reading 103-format dta files (:issue:`58554`)
5455
- Support reading Stata 102-format (Stata 1) dta files (:issue:`58978`)
5556
- Support reading Stata 110-format (Stata 7) dta files (:issue:`47176`)
@@ -547,6 +548,7 @@ Strings
547548

548549
Interval
549550
^^^^^^^^
551+
- :meth:`Index.is_monotonic_decreasing`, :meth:`Index.is_monotonic_increasing`, and :meth:`Index.is_unique` could incorrectly be ``False`` for an ``Index`` created from a slice of another ``Index``. (:issue:`57911`)
550552
- Bug in :func:`interval_range` where start and end numeric types were always cast to 64 bit (:issue:`57268`)
551553
-
552554

@@ -609,6 +611,7 @@ Groupby/resample/rolling
609611
- Bug in :meth:`DataFrameGroupBy.agg` that raises ``AttributeError`` when there is dictionary input and duplicated columns, instead of returning a DataFrame with the aggregation of all duplicate columns. (:issue:`55041`)
610612
- Bug in :meth:`DataFrameGroupBy.apply` that was returning a completely empty DataFrame when all return values of ``func`` were ``None`` instead of returning an empty DataFrame with the original columns and dtypes. (:issue:`57775`)
611613
- Bug in :meth:`DataFrameGroupBy.apply` with ``as_index=False`` that was returning :class:`MultiIndex` instead of returning :class:`Index`. (:issue:`58291`)
614+
- Bug in :meth:`DataFrameGroupBy.cumsum` and :meth:`DataFrameGroupBy.cumprod` where ``numeric_only`` parameter was passed indirectly through kwargs instead of passing directly. (:issue:`58811`)
612615
- Bug in :meth:`DataFrameGroupBy.cumsum` where it did not return the correct dtype when the label contained ``None``. (:issue:`58811`)
613616
- Bug in :meth:`DataFrameGroupby.transform` and :meth:`SeriesGroupby.transform` with a reducer and ``observed=False`` that coerces dtype to float when there are unobserved categories. (:issue:`55326`)
614617
- Bug in :meth:`Rolling.apply` where the applied function could be called on fewer than ``min_period`` periods if ``method="table"``. (:issue:`58868`)

pandas/_libs/index.pyx

Lines changed: 32 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -252,14 +252,24 @@ cdef class IndexEngine:
252252
return self.sizeof()
253253

254254
cpdef _update_from_sliced(self, IndexEngine other, reverse: bool):
255-
self.unique = other.unique
256-
self.need_unique_check = other.need_unique_check
255+
if other.unique:
256+
self.unique = other.unique
257+
self.need_unique_check = other.need_unique_check
258+
257259
if not other.need_monotonic_check and (
258260
other.is_monotonic_increasing or other.is_monotonic_decreasing):
259-
self.need_monotonic_check = other.need_monotonic_check
260-
# reverse=True means the index has been reversed
261-
self.monotonic_inc = other.monotonic_dec if reverse else other.monotonic_inc
262-
self.monotonic_dec = other.monotonic_inc if reverse else other.monotonic_dec
261+
self.need_monotonic_check = 0
262+
if len(self.values) > 0 and self.values[0] != self.values[-1]:
263+
# reverse=True means the index has been reversed
264+
if reverse:
265+
self.monotonic_inc = other.monotonic_dec
266+
self.monotonic_dec = other.monotonic_inc
267+
else:
268+
self.monotonic_inc = other.monotonic_inc
269+
self.monotonic_dec = other.monotonic_dec
270+
else:
271+
self.monotonic_inc = 1
272+
self.monotonic_dec = 1
263273

264274
@property
265275
def is_unique(self) -> bool:
@@ -882,14 +892,24 @@ cdef class SharedEngine:
882892
pass
883893

884894
cpdef _update_from_sliced(self, ExtensionEngine other, reverse: bool):
885-
self.unique = other.unique
886-
self.need_unique_check = other.need_unique_check
895+
if other.unique:
896+
self.unique = other.unique
897+
self.need_unique_check = other.need_unique_check
898+
887899
if not other.need_monotonic_check and (
888900
other.is_monotonic_increasing or other.is_monotonic_decreasing):
889-
self.need_monotonic_check = other.need_monotonic_check
890-
# reverse=True means the index has been reversed
891-
self.monotonic_inc = other.monotonic_dec if reverse else other.monotonic_inc
892-
self.monotonic_dec = other.monotonic_inc if reverse else other.monotonic_dec
901+
self.need_monotonic_check = 0
902+
if len(self.values) > 0 and self.values[0] != self.values[-1]:
903+
# reverse=True means the index has been reversed
904+
if reverse:
905+
self.monotonic_inc = other.monotonic_dec
906+
self.monotonic_dec = other.monotonic_inc
907+
else:
908+
self.monotonic_inc = other.monotonic_inc
909+
self.monotonic_dec = other.monotonic_dec
910+
else:
911+
self.monotonic_inc = 1
912+
self.monotonic_dec = 1
893913

894914
@property
895915
def is_unique(self) -> bool:

pandas/_libs/lib.pyx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2702,7 +2702,7 @@ def maybe_convert_objects(ndarray[object] objects,
27022702
if using_string_dtype() and is_string_array(objects, skipna=True):
27032703
from pandas.core.arrays.string_ import StringDtype
27042704

2705-
dtype = StringDtype(storage="pyarrow", na_value=np.nan)
2705+
dtype = StringDtype(na_value=np.nan)
27062706
return dtype.construct_array_type()._from_sequence(objects, dtype=dtype)
27072707

27082708
elif convert_to_nullable_dtype and is_string_array(objects, skipna=True):

pandas/_libs/tslibs/offsets.pyx

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -491,6 +491,12 @@ cdef class BaseOffset:
491491
elif is_integer_object(other):
492492
return type(self)(n=other * self.n, normalize=self.normalize,
493493
**self.kwds)
494+
elif isinstance(other, BaseOffset):
495+
# Otherwise raises RecurrsionError due to __rmul__
496+
raise TypeError(
497+
f"Cannot multiply {type(self).__name__} with "
498+
f"{type(other).__name__}."
499+
)
494500
return NotImplemented
495501

496502
def __rmul__(self, other):

pandas/_testing/__init__.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212

1313
import numpy as np
1414

15+
from pandas._config import using_string_dtype
1516
from pandas._config.localization import (
1617
can_set_locale,
1718
get_locales,
@@ -106,7 +107,10 @@
106107
ALL_FLOAT_DTYPES: list[Dtype] = [*FLOAT_NUMPY_DTYPES, *FLOAT_EA_DTYPES]
107108

108109
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
109-
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
110+
if using_string_dtype():
111+
STRING_DTYPES: list[Dtype] = [str, "U"]
112+
else:
113+
STRING_DTYPES: list[Dtype] = [str, "str", "U"] # type: ignore[no-redef]
110114
COMPLEX_FLOAT_DTYPES: list[Dtype] = [*COMPLEX_DTYPES, *FLOAT_NUMPY_DTYPES]
111115

112116
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]

pandas/_testing/asserters.py

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -796,6 +796,24 @@ def assert_extension_array_equal(
796796
left_na, right_na, obj=f"{obj} NA mask", index_values=index_values
797797
)
798798

799+
# Specifically for StringArrayNumpySemantics, validate here we have a valid array
800+
if (
801+
isinstance(left.dtype, StringDtype)
802+
and left.dtype.storage == "python"
803+
and left.dtype.na_value is np.nan
804+
):
805+
assert np.all(
806+
[np.isnan(val) for val in left._ndarray[left_na]] # type: ignore[attr-defined]
807+
), "wrong missing value sentinels"
808+
if (
809+
isinstance(right.dtype, StringDtype)
810+
and right.dtype.storage == "python"
811+
and right.dtype.na_value is np.nan
812+
):
813+
assert np.all(
814+
[np.isnan(val) for val in right._ndarray[right_na]] # type: ignore[attr-defined]
815+
), "wrong missing value sentinels"
816+
799817
left_valid = left[~left_na].to_numpy(dtype=object)
800818
right_valid = right[~right_na].to_numpy(dtype=object)
801819
if check_exact:
@@ -1158,7 +1176,10 @@ def assert_frame_equal(
11581176
Specify how to compare internal data. If False, compare by columns.
11591177
If True, compare by blocks.
11601178
check_exact : bool, default False
1161-
Whether to compare number exactly.
1179+
Whether to compare number exactly. If False, the comparison uses the
1180+
relative tolerance (``rtol``) and absolute tolerance (``atol``)
1181+
parameters to determine if two values are considered close,
1182+
according to the formula: ``|a - b| <= (atol + rtol * |b|)``.
11621183
11631184
.. versionchanged:: 2.2.0
11641185

0 commit comments

Comments
 (0)