Skip to content

Commit a062231

Browse files
Merge remote-tracking branch 'upstream/main' into string-dtype-isin
2 parents 5a2d4e4 + 16b7288 commit a062231

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

70 files changed

+752
-487
lines changed

.github/workflows/unit-tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,7 @@ jobs:
380380
fetch-depth: 0
381381

382382
- name: Set up Python Free-threading Version
383-
uses: deadsnakes/action@v3.1.0
383+
uses: deadsnakes/action@v3.2.0
384384
with:
385385
python-version: 3.13-dev
386386
nogil: true

ci/code_checks.sh

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
7575
-i "pandas.Period.ordinal GL08" \
7676
-i "pandas.PeriodDtype.freq SA01" \
7777
-i "pandas.RangeIndex.from_range PR01,SA01" \
78-
-i "pandas.RangeIndex.start SA01" \
7978
-i "pandas.RangeIndex.step SA01" \
80-
-i "pandas.RangeIndex.stop SA01" \
8179
-i "pandas.Series.cat.add_categories PR01,PR02" \
8280
-i "pandas.Series.cat.as_ordered PR01" \
8381
-i "pandas.Series.cat.as_unordered PR01" \
@@ -92,10 +90,8 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
9290
-i "pandas.Series.dt.floor PR01,PR02" \
9391
-i "pandas.Series.dt.freq GL08" \
9492
-i "pandas.Series.dt.month_name PR01,PR02" \
95-
-i "pandas.Series.dt.nanoseconds SA01" \
9693
-i "pandas.Series.dt.normalize PR01" \
9794
-i "pandas.Series.dt.round PR01,PR02" \
98-
-i "pandas.Series.dt.seconds SA01" \
9995
-i "pandas.Series.dt.strftime PR01,PR02" \
10096
-i "pandas.Series.dt.to_period PR01,PR02" \
10197
-i "pandas.Series.dt.total_seconds PR01" \
@@ -113,23 +109,13 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
113109
-i "pandas.Timedelta.resolution PR02" \
114110
-i "pandas.Timedelta.to_timedelta64 SA01" \
115111
-i "pandas.Timedelta.total_seconds SA01" \
116-
-i "pandas.TimedeltaIndex.nanoseconds SA01" \
117-
-i "pandas.TimedeltaIndex.seconds SA01" \
118112
-i "pandas.TimedeltaIndex.to_pytimedelta RT03,SA01" \
119113
-i "pandas.Timestamp.max PR02" \
120114
-i "pandas.Timestamp.min PR02" \
121115
-i "pandas.Timestamp.nanosecond GL08" \
122116
-i "pandas.Timestamp.resolution PR02" \
123117
-i "pandas.Timestamp.tzinfo GL08" \
124118
-i "pandas.Timestamp.year GL08" \
125-
-i "pandas.api.extensions.ExtensionArray.interpolate PR01,SA01" \
126-
-i "pandas.api.types.is_bool PR01,SA01" \
127-
-i "pandas.api.types.is_categorical_dtype SA01" \
128-
-i "pandas.api.types.is_complex PR01,SA01" \
129-
-i "pandas.api.types.is_complex_dtype SA01" \
130-
-i "pandas.api.types.is_datetime64_dtype SA01" \
131-
-i "pandas.api.types.is_datetime64_ns_dtype SA01" \
132-
-i "pandas.api.types.is_datetime64tz_dtype SA01" \
133119
-i "pandas.api.types.is_dict_like PR07,SA01" \
134120
-i "pandas.api.types.is_extension_array_dtype SA01" \
135121
-i "pandas.api.types.is_file_like PR07,SA01" \
@@ -163,7 +149,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
163149
-i "pandas.core.groupby.DataFrameGroupBy.agg RT03" \
164150
-i "pandas.core.groupby.DataFrameGroupBy.aggregate RT03" \
165151
-i "pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01" \
166-
-i "pandas.core.groupby.DataFrameGroupBy.filter SA01" \
167152
-i "pandas.core.groupby.DataFrameGroupBy.get_group RT03,SA01" \
168153
-i "pandas.core.groupby.DataFrameGroupBy.groups SA01" \
169154
-i "pandas.core.groupby.DataFrameGroupBy.hist RT03" \
@@ -179,7 +164,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
179164
-i "pandas.core.groupby.SeriesGroupBy.__iter__ RT03,SA01" \
180165
-i "pandas.core.groupby.SeriesGroupBy.agg RT03" \
181166
-i "pandas.core.groupby.SeriesGroupBy.aggregate RT03" \
182-
-i "pandas.core.groupby.SeriesGroupBy.filter PR01,SA01" \
183167
-i "pandas.core.groupby.SeriesGroupBy.get_group RT03,SA01" \
184168
-i "pandas.core.groupby.SeriesGroupBy.groups SA01" \
185169
-i "pandas.core.groupby.SeriesGroupBy.indices SA01" \

doc/source/whatsnew/v2.3.0.rst

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,8 +103,9 @@ Conversion
103103
Strings
104104
^^^^^^^
105105
- Bug in :meth:`Series.str.replace` when ``n < 0`` for :class:`StringDtype` with ``storage="pyarrow"`` (:issue:`59628`)
106+
- Bug in ``ser.str.slice`` with negative ``step`` with :class:`ArrowDtype` and :class:`StringDtype` with ``storage="pyarrow"`` giving incorrect results (:issue:`59710`)
106107
- Bug in the ``center`` method on :class:`Series` and :class:`Index` object ``str`` accessors with pyarrow-backed dtype not matching the python behavior in corner cases with an odd number of fill characters (:issue:`54792`)
107-
108+
-
108109

109110
Interval
110111
^^^^^^^^

doc/source/whatsnew/v3.0.0.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ Other enhancements
5555
- :meth:`Series.plot` now correctly handle the ``ylabel`` parameter for pie charts, allowing for explicit control over the y-axis label (:issue:`58239`)
5656
- :meth:`DataFrame.plot.scatter` argument ``c`` now accepts a column of strings, where rows with the same string are colored identically (:issue:`16827` and :issue:`16485`)
5757
- :meth:`pandas.concat` will raise a ``ValueError`` when ``ignore_index=True`` and ``keys`` is not ``None`` (:issue:`59274`)
58+
- :meth:`str.get_dummies` now accepts a ``dtype`` parameter to specify the dtype of the resulting DataFrame (:issue:`47872`)
5859
- Multiplying two :class:`DateOffset` objects will now raise a ``TypeError`` instead of a ``RecursionError`` (:issue:`59442`)
5960
- Restore support for reading Stata 104-format and enable reading 103-format dta files (:issue:`58554`)
6061
- Support passing a :class:`Iterable[Hashable]` input to :meth:`DataFrame.drop_duplicates` (:issue:`59237`)
@@ -668,6 +669,7 @@ Reshaping
668669
- Bug in :meth:`DataFrame.join` when a :class:`DataFrame` with a :class:`MultiIndex` would raise an ``AssertionError`` when :attr:`MultiIndex.names` contained ``None``. (:issue:`58721`)
669670
- Bug in :meth:`DataFrame.merge` where merging on a column containing only ``NaN`` values resulted in an out-of-bounds array access (:issue:`59421`)
670671
- Bug in :meth:`DataFrame.unstack` producing incorrect results when ``sort=False`` (:issue:`54987`, :issue:`55516`)
672+
- Bug in :meth:`DataFrame.pivot_table` incorrectly subaggregating results when called without an ``index`` argument (:issue:`58722`)
671673
- Bug in :meth:`DataFrame.unstack` producing incorrect results when manipulating empty :class:`DataFrame` with an :class:`ExtentionDtype` (:issue:`59123`)
672674

673675
Sparse

pandas/_libs/lib.pyx

Lines changed: 35 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -733,7 +733,9 @@ cpdef ndarray[object] ensure_string_array(
733733
convert_na_value : bool, default True
734734
If False, existing na values will be used unchanged in the new array.
735735
copy : bool, default True
736-
Whether to ensure that a new array is returned.
736+
Whether to ensure that a new array is returned. When True, a new array
737+
is always returned. When False, a new array is only returned when needed
738+
to avoid mutating the input array.
737739
skipna : bool, default True
738740
Whether or not to coerce nulls to their stringified form
739741
(e.g. if False, NaN becomes 'nan').
@@ -762,11 +764,15 @@ cpdef ndarray[object] ensure_string_array(
762764

763765
result = np.asarray(arr, dtype="object")
764766

765-
if copy and (result is arr or np.shares_memory(arr, result)):
766-
# GH#54654
767-
result = result.copy()
768-
elif not copy and result is arr:
769-
already_copied = False
767+
if result is arr or np.may_share_memory(arr, result):
768+
# if np.asarray(..) did not make a copy of the input arr, we still need
769+
# to do that to avoid mutating the input array
770+
# GH#54654: share_memory check is needed for rare cases where np.asarray
771+
# returns a new object without making a copy of the actual data
772+
if copy:
773+
result = result.copy()
774+
else:
775+
already_copied = False
770776
elif not copy and not result.flags.writeable:
771777
# Weird edge case where result is a view
772778
already_copied = False
@@ -1123,10 +1129,21 @@ def is_bool(obj: object) -> bool:
11231129
"""
11241130
Return True if given object is boolean.
11251131

1132+
Parameters
1133+
----------
1134+
obj : object
1135+
Object to check.
1136+
11261137
Returns
11271138
-------
11281139
bool
11291140

1141+
See Also
1142+
--------
1143+
api.types.is_scalar : Check if the input is a scalar.
1144+
api.types.is_integer : Check if the input is an integer.
1145+
api.types.is_float : Check if the input is a float.
1146+
11301147
Examples
11311148
--------
11321149
>>> pd.api.types.is_bool(True)
@@ -1142,10 +1159,22 @@ def is_complex(obj: object) -> bool:
11421159
"""
11431160
Return True if given object is complex.
11441161

1162+
Parameters
1163+
----------
1164+
obj : object
1165+
Object to check.
1166+
11451167
Returns
11461168
-------
11471169
bool
11481170

1171+
See Also
1172+
--------
1173+
api.types.is_complex_dtype: Check whether the provided array or
1174+
dtype is of a complex dtype.
1175+
api.types.is_number: Check if the object is a number.
1176+
api.types.is_integer: Return True if given object is integer.
1177+
11491178
Examples
11501179
--------
11511180
>>> pd.api.types.is_complex(1 + 1j)

pandas/conftest.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1272,6 +1272,34 @@ def string_dtype(request):
12721272
return request.param
12731273

12741274

1275+
@pytest.fixture(
1276+
params=[
1277+
("python", pd.NA),
1278+
pytest.param(("pyarrow", pd.NA), marks=td.skip_if_no("pyarrow")),
1279+
pytest.param(("pyarrow", np.nan), marks=td.skip_if_no("pyarrow")),
1280+
("python", np.nan),
1281+
],
1282+
ids=[
1283+
"string=string[python]",
1284+
"string=string[pyarrow]",
1285+
"string=str[pyarrow]",
1286+
"string=str[python]",
1287+
],
1288+
)
1289+
def string_dtype_no_object(request):
1290+
"""
1291+
Parametrized fixture for string dtypes.
1292+
* 'string[python]' (NA variant)
1293+
* 'string[pyarrow]' (NA variant)
1294+
* 'str' (NaN variant, with pyarrow)
1295+
* 'str' (NaN variant, without pyarrow)
1296+
"""
1297+
# need to instantiate the StringDtype here instead of in the params
1298+
# to avoid importing pyarrow during test collection
1299+
storage, na_value = request.param
1300+
return pd.StringDtype(storage, na_value)
1301+
1302+
12751303
@pytest.fixture(
12761304
params=[
12771305
"string[python]",

pandas/core/arrays/_arrow_string_mixins.py

Lines changed: 23 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
from pandas.compat import (
1313
pa_version_under10p1,
14+
pa_version_under11p0,
1415
pa_version_under13p0,
1516
pa_version_under17p0,
1617
)
@@ -22,10 +23,7 @@
2223
import pyarrow.compute as pc
2324

2425
if TYPE_CHECKING:
25-
from collections.abc import (
26-
Callable,
27-
Sized,
28-
)
26+
from collections.abc import Callable
2927

3028
from pandas._typing import (
3129
Scalar,
@@ -34,7 +32,7 @@
3432

3533

3634
class ArrowStringArrayMixin:
37-
_pa_array: Sized
35+
_pa_array: pa.ChunkedArray
3836

3937
def __init__(self, *args, **kwargs) -> None:
4038
raise NotImplementedError
@@ -96,13 +94,29 @@ def _str_get(self, i: int) -> Self:
9694
selected = pc.utf8_slice_codeunits(
9795
self._pa_array, start=start, stop=stop, step=step
9896
)
99-
null_value = pa.scalar(
100-
None,
101-
type=self._pa_array.type, # type: ignore[attr-defined]
102-
)
97+
null_value = pa.scalar(None, type=self._pa_array.type)
10398
result = pc.if_else(not_out_of_bounds, selected, null_value)
10499
return type(self)(result)
105100

101+
def _str_slice(
102+
self, start: int | None = None, stop: int | None = None, step: int | None = None
103+
) -> Self:
104+
if pa_version_under11p0:
105+
# GH#59724
106+
result = self._apply_elementwise(lambda val: val[start:stop:step])
107+
return type(self)(pa.chunked_array(result, type=self._pa_array.type))
108+
if start is None:
109+
if step is not None and step < 0:
110+
# GH#59710
111+
start = -1
112+
else:
113+
start = 0
114+
if step is None:
115+
step = 1
116+
return type(self)(
117+
pc.utf8_slice_codeunits(self._pa_array, start=start, stop=stop, step=step)
118+
)
119+
106120
def _str_slice_replace(
107121
self, start: int | None = None, stop: int | None = None, repl: str | None = None
108122
) -> Self:

pandas/core/arrays/arrow/_arrow_utils.py

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,8 @@
11
from __future__ import annotations
22

3-
import warnings
4-
53
import numpy as np
64
import pyarrow
75

8-
from pandas._config.config import get_option
9-
10-
from pandas.errors import PerformanceWarning
11-
from pandas.util._exceptions import find_stack_level
12-
13-
14-
def fallback_performancewarning(version: str | None = None) -> None:
15-
"""
16-
Raise a PerformanceWarning for falling back to ExtensionArray's
17-
non-pyarrow method
18-
"""
19-
if get_option("performance_warnings"):
20-
msg = "Falling back on a non-pyarrow code path which may decrease performance."
21-
if version is not None:
22-
msg += f" Upgrade to pyarrow >={version} to possibly suppress this warning."
23-
warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())
24-
256

267
def pyarrow_array_to_numpy_and_mask(
278
arr, dtype: np.dtype

pandas/core/arrays/arrow/array.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
is_list_like,
4242
is_numeric_dtype,
4343
is_scalar,
44+
pandas_dtype,
4445
)
4546
from pandas.core.dtypes.dtypes import DatetimeTZDtype
4647
from pandas.core.dtypes.missing import isna
@@ -2393,17 +2394,6 @@ def _str_rpartition(self, sep: str, expand: bool) -> Self:
23932394
result = self._apply_elementwise(predicate)
23942395
return type(self)(pa.chunked_array(result))
23952396

2396-
def _str_slice(
2397-
self, start: int | None = None, stop: int | None = None, step: int | None = None
2398-
) -> Self:
2399-
if start is None:
2400-
start = 0
2401-
if step is None:
2402-
step = 1
2403-
return type(self)(
2404-
pc.utf8_slice_codeunits(self._pa_array, start=start, stop=stop, step=step)
2405-
)
2406-
24072397
def _str_len(self) -> Self:
24082398
return type(self)(pc.utf8_length(self._pa_array))
24092399

@@ -2475,7 +2465,9 @@ def _str_findall(self, pat: str, flags: int = 0) -> Self:
24752465
result = self._apply_elementwise(predicate)
24762466
return type(self)(pa.chunked_array(result))
24772467

2478-
def _str_get_dummies(self, sep: str = "|"):
2468+
def _str_get_dummies(self, sep: str = "|", dtype: NpDtype | None = None):
2469+
if dtype is None:
2470+
dtype = np.bool_
24792471
split = pc.split_pattern(self._pa_array, sep)
24802472
flattened_values = pc.list_flatten(split)
24812473
uniques = flattened_values.unique()
@@ -2485,7 +2477,15 @@ def _str_get_dummies(self, sep: str = "|"):
24852477
n_cols = len(uniques)
24862478
indices = pc.index_in(flattened_values, uniques_sorted).to_numpy()
24872479
indices = indices + np.arange(n_rows).repeat(lengths) * n_cols
2488-
dummies = np.zeros(n_rows * n_cols, dtype=np.bool_)
2480+
_dtype = pandas_dtype(dtype)
2481+
dummies_dtype: NpDtype
2482+
if isinstance(_dtype, np.dtype):
2483+
dummies_dtype = _dtype
2484+
else:
2485+
dummies_dtype = np.bool_
2486+
dummies = np.zeros(n_rows * n_cols, dtype=dummies_dtype)
2487+
if dtype == str:
2488+
dummies[:] = False
24892489
dummies[indices] = True
24902490
dummies = dummies.reshape((n_rows, n_cols))
24912491
result = type(self)(pa.array(list(dummies)))

0 commit comments

Comments
 (0)