Skip to content

Commit 468b9c1

Browse files
authored
Merge branch 'main' into groupby_filter_docstring
2 parents d0865a1 + 715585d commit 468b9c1

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+580
-334
lines changed

.github/workflows/unit-tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,7 @@ jobs:
380380
fetch-depth: 0
381381

382382
- name: Set up Python Free-threading Version
383-
uses: deadsnakes/action@v3.1.0
383+
uses: deadsnakes/action@v3.2.0
384384
with:
385385
python-version: 3.13-dev
386386
nogil: true

ci/code_checks.sh

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
7575
-i "pandas.Period.ordinal GL08" \
7676
-i "pandas.PeriodDtype.freq SA01" \
7777
-i "pandas.RangeIndex.from_range PR01,SA01" \
78-
-i "pandas.RangeIndex.start SA01" \
7978
-i "pandas.RangeIndex.step SA01" \
80-
-i "pandas.RangeIndex.stop SA01" \
8179
-i "pandas.Series.cat.add_categories PR01,PR02" \
8280
-i "pandas.Series.cat.as_ordered PR01" \
8381
-i "pandas.Series.cat.as_unordered PR01" \
@@ -92,10 +90,8 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
9290
-i "pandas.Series.dt.floor PR01,PR02" \
9391
-i "pandas.Series.dt.freq GL08" \
9492
-i "pandas.Series.dt.month_name PR01,PR02" \
95-
-i "pandas.Series.dt.nanoseconds SA01" \
9693
-i "pandas.Series.dt.normalize PR01" \
9794
-i "pandas.Series.dt.round PR01,PR02" \
98-
-i "pandas.Series.dt.seconds SA01" \
9995
-i "pandas.Series.dt.strftime PR01,PR02" \
10096
-i "pandas.Series.dt.to_period PR01,PR02" \
10197
-i "pandas.Series.dt.total_seconds PR01" \
@@ -113,8 +109,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
113109
-i "pandas.Timedelta.resolution PR02" \
114110
-i "pandas.Timedelta.to_timedelta64 SA01" \
115111
-i "pandas.Timedelta.total_seconds SA01" \
116-
-i "pandas.TimedeltaIndex.nanoseconds SA01" \
117-
-i "pandas.TimedeltaIndex.seconds SA01" \
118112
-i "pandas.TimedeltaIndex.to_pytimedelta RT03,SA01" \
119113
-i "pandas.Timestamp.max PR02" \
120114
-i "pandas.Timestamp.min PR02" \
@@ -123,13 +117,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
123117
-i "pandas.Timestamp.tzinfo GL08" \
124118
-i "pandas.Timestamp.year GL08" \
125119
-i "pandas.api.extensions.ExtensionArray.interpolate PR01,SA01" \
126-
-i "pandas.api.types.is_bool PR01,SA01" \
127-
-i "pandas.api.types.is_categorical_dtype SA01" \
128-
-i "pandas.api.types.is_complex PR01,SA01" \
129-
-i "pandas.api.types.is_complex_dtype SA01" \
130-
-i "pandas.api.types.is_datetime64_dtype SA01" \
131-
-i "pandas.api.types.is_datetime64_ns_dtype SA01" \
132-
-i "pandas.api.types.is_datetime64tz_dtype SA01" \
133120
-i "pandas.api.types.is_dict_like PR07,SA01" \
134121
-i "pandas.api.types.is_extension_array_dtype SA01" \
135122
-i "pandas.api.types.is_file_like PR07,SA01" \

doc/source/whatsnew/v3.0.0.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ Other enhancements
5555
- :meth:`Series.plot` now correctly handle the ``ylabel`` parameter for pie charts, allowing for explicit control over the y-axis label (:issue:`58239`)
5656
- :meth:`DataFrame.plot.scatter` argument ``c`` now accepts a column of strings, where rows with the same string are colored identically (:issue:`16827` and :issue:`16485`)
5757
- :meth:`pandas.concat` will raise a ``ValueError`` when ``ignore_index=True`` and ``keys`` is not ``None`` (:issue:`59274`)
58+
- :meth:`str.get_dummies` now accepts a ``dtype`` parameter to specify the dtype of the resulting DataFrame (:issue:`47872`)
5859
- Multiplying two :class:`DateOffset` objects will now raise a ``TypeError`` instead of a ``RecursionError`` (:issue:`59442`)
5960
- Restore support for reading Stata 104-format and enable reading 103-format dta files (:issue:`58554`)
6061
- Support passing a :class:`Iterable[Hashable]` input to :meth:`DataFrame.drop_duplicates` (:issue:`59237`)

pandas/_libs/lib.pyx

Lines changed: 35 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -733,7 +733,9 @@ cpdef ndarray[object] ensure_string_array(
733733
convert_na_value : bool, default True
734734
If False, existing na values will be used unchanged in the new array.
735735
copy : bool, default True
736-
Whether to ensure that a new array is returned.
736+
Whether to ensure that a new array is returned. When True, a new array
737+
is always returned. When False, a new array is only returned when needed
738+
to avoid mutating the input array.
737739
skipna : bool, default True
738740
Whether or not to coerce nulls to their stringified form
739741
(e.g. if False, NaN becomes 'nan').
@@ -762,11 +764,15 @@ cpdef ndarray[object] ensure_string_array(
762764

763765
result = np.asarray(arr, dtype="object")
764766

765-
if copy and (result is arr or np.shares_memory(arr, result)):
766-
# GH#54654
767-
result = result.copy()
768-
elif not copy and result is arr:
769-
already_copied = False
767+
if result is arr or np.may_share_memory(arr, result):
768+
# if np.asarray(..) did not make a copy of the input arr, we still need
769+
# to do that to avoid mutating the input array
770+
# GH#54654: share_memory check is needed for rare cases where np.asarray
771+
# returns a new object without making a copy of the actual data
772+
if copy:
773+
result = result.copy()
774+
else:
775+
already_copied = False
770776
elif not copy and not result.flags.writeable:
771777
# Weird edge case where result is a view
772778
already_copied = False
@@ -1123,10 +1129,21 @@ def is_bool(obj: object) -> bool:
11231129
"""
11241130
Return True if given object is boolean.
11251131

1132+
Parameters
1133+
----------
1134+
obj : object
1135+
Object to check.
1136+
11261137
Returns
11271138
-------
11281139
bool
11291140

1141+
See Also
1142+
--------
1143+
api.types.is_scalar : Check if the input is a scalar.
1144+
api.types.is_integer : Check if the input is an integer.
1145+
api.types.is_float : Check if the input is a float.
1146+
11301147
Examples
11311148
--------
11321149
>>> pd.api.types.is_bool(True)
@@ -1142,10 +1159,22 @@ def is_complex(obj: object) -> bool:
11421159
"""
11431160
Return True if given object is complex.
11441161

1162+
Parameters
1163+
----------
1164+
obj : object
1165+
Object to check.
1166+
11451167
Returns
11461168
-------
11471169
bool
11481170

1171+
See Also
1172+
--------
1173+
api.types.is_complex_dtype: Check whether the provided array or
1174+
dtype is of a complex dtype.
1175+
api.types.is_number: Check if the object is a number.
1176+
api.types.is_integer: Return True if given object is integer.
1177+
11491178
Examples
11501179
--------
11511180
>>> pd.api.types.is_complex(1 + 1j)

pandas/conftest.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1272,6 +1272,34 @@ def string_dtype(request):
12721272
return request.param
12731273

12741274

1275+
@pytest.fixture(
1276+
params=[
1277+
("python", pd.NA),
1278+
pytest.param(("pyarrow", pd.NA), marks=td.skip_if_no("pyarrow")),
1279+
pytest.param(("pyarrow", np.nan), marks=td.skip_if_no("pyarrow")),
1280+
("python", np.nan),
1281+
],
1282+
ids=[
1283+
"string=string[python]",
1284+
"string=string[pyarrow]",
1285+
"string=str[pyarrow]",
1286+
"string=str[python]",
1287+
],
1288+
)
1289+
def string_dtype_no_object(request):
1290+
"""
1291+
Parametrized fixture for string dtypes.
1292+
* 'string[python]' (NA variant)
1293+
* 'string[pyarrow]' (NA variant)
1294+
* 'str' (NaN variant, with pyarrow)
1295+
* 'str' (NaN variant, without pyarrow)
1296+
"""
1297+
# need to instantiate the StringDtype here instead of in the params
1298+
# to avoid importing pyarrow during test collection
1299+
storage, na_value = request.param
1300+
return pd.StringDtype(storage, na_value)
1301+
1302+
12751303
@pytest.fixture(
12761304
params=[
12771305
"string[python]",

pandas/core/arrays/arrow/array.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
is_list_like,
4242
is_numeric_dtype,
4343
is_scalar,
44+
pandas_dtype,
4445
)
4546
from pandas.core.dtypes.dtypes import DatetimeTZDtype
4647
from pandas.core.dtypes.missing import isna
@@ -2475,7 +2476,9 @@ def _str_findall(self, pat: str, flags: int = 0) -> Self:
24752476
result = self._apply_elementwise(predicate)
24762477
return type(self)(pa.chunked_array(result))
24772478

2478-
def _str_get_dummies(self, sep: str = "|"):
2479+
def _str_get_dummies(self, sep: str = "|", dtype: NpDtype | None = None):
2480+
if dtype is None:
2481+
dtype = np.bool_
24792482
split = pc.split_pattern(self._pa_array, sep)
24802483
flattened_values = pc.list_flatten(split)
24812484
uniques = flattened_values.unique()
@@ -2485,7 +2488,15 @@ def _str_get_dummies(self, sep: str = "|"):
24852488
n_cols = len(uniques)
24862489
indices = pc.index_in(flattened_values, uniques_sorted).to_numpy()
24872490
indices = indices + np.arange(n_rows).repeat(lengths) * n_cols
2488-
dummies = np.zeros(n_rows * n_cols, dtype=np.bool_)
2491+
_dtype = pandas_dtype(dtype)
2492+
dummies_dtype: NpDtype
2493+
if isinstance(_dtype, np.dtype):
2494+
dummies_dtype = _dtype
2495+
else:
2496+
dummies_dtype = np.bool_
2497+
dummies = np.zeros(n_rows * n_cols, dtype=dummies_dtype)
2498+
if dtype == str:
2499+
dummies[:] = False
24892500
dummies[indices] = True
24902501
dummies = dummies.reshape((n_rows, n_cols))
24912502
result = type(self)(pa.array(list(dummies)))

pandas/core/arrays/categorical.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2681,11 +2681,11 @@ def _str_map(
26812681
result = NumpyExtensionArray(categories.to_numpy())._str_map(f, na_value, dtype)
26822682
return take_nd(result, codes, fill_value=na_value)
26832683

2684-
def _str_get_dummies(self, sep: str = "|"):
2684+
def _str_get_dummies(self, sep: str = "|", dtype: NpDtype | None = None):
26852685
# sep may not be in categories. Just bail on this.
26862686
from pandas.core.arrays import NumpyExtensionArray
26872687

2688-
return NumpyExtensionArray(self.astype(str))._str_get_dummies(sep)
2688+
return NumpyExtensionArray(self.astype(str))._str_get_dummies(sep, dtype)
26892689

26902690
# ------------------------------------------------------------------------
26912691
# GroupBy Methods

pandas/core/arrays/string_.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -715,6 +715,10 @@ def __setitem__(self, key, value) -> None:
715715
else:
716716
if not is_array_like(value):
717717
value = np.asarray(value, dtype=object)
718+
else:
719+
# cast categories and friends to arrays to see if values are
720+
# compatible, compatibility with arrow backed strings
721+
value = np.asarray(value)
718722
if len(value) and not lib.is_string_array(value, skipna=True):
719723
raise TypeError("Must provide strings.")
720724

pandas/core/arrays/string_arrow.py

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@
5656
ArrayLike,
5757
AxisInt,
5858
Dtype,
59+
NpDtype,
5960
Scalar,
6061
Self,
6162
npt,
@@ -240,7 +241,7 @@ def _maybe_convert_setitem_value(self, value):
240241
value[isna(value)] = None
241242
for v in value:
242243
if not (v is None or isinstance(v, str)):
243-
raise TypeError("Scalar must be NA or str")
244+
raise TypeError("Must provide strings")
244245
return super()._maybe_convert_setitem_value(value)
245246

246247
def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
@@ -425,12 +426,22 @@ def _str_find(self, sub: str, start: int = 0, end: int | None = None):
425426
return super()._str_find(sub, start, end)
426427
return ArrowStringArrayMixin._str_find(self, sub, start, end)
427428

428-
def _str_get_dummies(self, sep: str = "|"):
429-
dummies_pa, labels = ArrowExtensionArray(self._pa_array)._str_get_dummies(sep)
429+
def _str_get_dummies(self, sep: str = "|", dtype: NpDtype | None = None):
430+
if dtype is None:
431+
dtype = np.int64
432+
dummies_pa, labels = ArrowExtensionArray(self._pa_array)._str_get_dummies(
433+
sep, dtype
434+
)
430435
if len(labels) == 0:
431-
return np.empty(shape=(0, 0), dtype=np.int64), labels
436+
return np.empty(shape=(0, 0), dtype=dtype), labels
432437
dummies = np.vstack(dummies_pa.to_numpy())
433-
return dummies.astype(np.int64, copy=False), labels
438+
_dtype = pandas_dtype(dtype)
439+
dummies_dtype: NpDtype
440+
if isinstance(_dtype, np.dtype):
441+
dummies_dtype = _dtype
442+
else:
443+
dummies_dtype = np.bool_
444+
return dummies.astype(dummies_dtype, copy=False), labels
434445

435446
def _convert_int_result(self, result):
436447
if self.dtype.na_value is np.nan:

pandas/core/arrays/timedeltas.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -842,6 +842,11 @@ def to_pytimedelta(self) -> npt.NDArray[np.object_]:
842842
seconds_docstring = textwrap.dedent(
843843
"""Number of seconds (>= 0 and less than 1 day) for each element.
844844
845+
See Also
846+
--------
847+
Series.dt.seconds : Return number of seconds for each element.
848+
Series.dt.nanoseconds : Return number of nanoseconds for each element.
849+
845850
Examples
846851
--------
847852
For Series:
@@ -917,6 +922,11 @@ def to_pytimedelta(self) -> npt.NDArray[np.object_]:
917922
nanoseconds_docstring = textwrap.dedent(
918923
"""Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.
919924
925+
See Also
926+
--------
927+
Series.dt.seconds : Return number of seconds for each element.
928+
Series.dt.microseconds : Return number of nanoseconds for each element.
929+
920930
Examples
921931
--------
922932
For Series:

0 commit comments

Comments
 (0)