Skip to content

Commit 1ad55bb

Browse files
Merge branch 'main' into table_prefixes
2 parents 3c8a12e + 602ae10 commit 1ad55bb

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

70 files changed

+831
-894
lines changed

.circleci/config.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ jobs:
3434
fi
3535
python -m pip install --no-build-isolation -ve . -Csetup-args="--werror"
3636
PATH=$HOME/miniconda3/envs/pandas-dev/bin:$HOME/miniconda3/condabin:$PATH
37-
sudo apt-get update && sudo apt-get install -y libegl1 libopengl0
3837
ci/run_tests.sh
3938
test-linux-musl:
4039
docker:

.github/workflows/unit-tests.yml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -385,10 +385,12 @@ jobs:
385385
nogil: true
386386

387387
- name: Build Environment
388+
# TODO: Once numpy 2.2.1 is out, don't install nightly version
389+
# Tests segfault with numpy 2.2.0: https://github.com/numpy/numpy/pull/27955
388390
run: |
389391
python --version
390-
python -m pip install --upgrade pip setuptools wheel numpy meson[ninja]==1.2.1 meson-python==0.13.1
391-
python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython
392+
python -m pip install --upgrade pip setuptools wheel meson[ninja]==1.2.1 meson-python==0.13.1
393+
python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython numpy
392394
python -m pip install versioneer[toml]
393395
python -m pip install python-dateutil pytz tzdata hypothesis>=6.84.0 pytest>=7.3.2 pytest-xdist>=3.4.0 pytest-cov
394396
python -m pip install -ve . --no-build-isolation --no-index --no-deps -Csetup-args="--werror"

ci/code_checks.sh

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -81,15 +81,10 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
8181
-i "pandas.Timestamp.resolution PR02" \
8282
-i "pandas.Timestamp.tzinfo GL08" \
8383
-i "pandas.arrays.ArrowExtensionArray PR07,SA01" \
84-
-i "pandas.arrays.IntervalArray.length SA01" \
8584
-i "pandas.arrays.NumpyExtensionArray SA01" \
8685
-i "pandas.arrays.TimedeltaArray PR07,SA01" \
87-
-i "pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01" \
88-
-i "pandas.core.groupby.DataFrameGroupBy.get_group RT03,SA01" \
8986
-i "pandas.core.groupby.DataFrameGroupBy.plot PR02" \
90-
-i "pandas.core.groupby.SeriesGroupBy.get_group RT03,SA01" \
9187
-i "pandas.core.groupby.SeriesGroupBy.plot PR02" \
92-
-i "pandas.core.resample.Resampler.get_group RT03,SA01" \
9388
-i "pandas.core.resample.Resampler.max PR01,RT03,SA01" \
9489
-i "pandas.core.resample.Resampler.mean SA01" \
9590
-i "pandas.core.resample.Resampler.min PR01,RT03,SA01" \
@@ -98,14 +93,8 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
9893
-i "pandas.core.resample.Resampler.std SA01" \
9994
-i "pandas.core.resample.Resampler.transform PR01,RT03,SA01" \
10095
-i "pandas.core.resample.Resampler.var SA01" \
101-
-i "pandas.errors.NullFrequencyError SA01" \
102-
-i "pandas.errors.NumbaUtilError SA01" \
103-
-i "pandas.errors.PerformanceWarning SA01" \
104-
-i "pandas.errors.UndefinedVariableError PR01,SA01" \
10596
-i "pandas.errors.ValueLabelTypeMismatch SA01" \
106-
-i "pandas.io.json.build_table_schema PR07,RT03,SA01" \
10797
-i "pandas.plotting.andrews_curves RT03,SA01" \
108-
-i "pandas.plotting.scatter_matrix PR07,SA01" \
10998
-i "pandas.tseries.offsets.BDay PR02,SA01" \
11099
-i "pandas.tseries.offsets.BQuarterBegin.is_on_offset GL08" \
111100
-i "pandas.tseries.offsets.BQuarterBegin.n GL08" \

doc/source/reference/frame.rst

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,6 @@ Reindexing / selection / label manipulation
185185
DataFrame.duplicated
186186
DataFrame.equals
187187
DataFrame.filter
188-
DataFrame.head
189188
DataFrame.idxmax
190189
DataFrame.idxmin
191190
DataFrame.reindex
@@ -196,7 +195,6 @@ Reindexing / selection / label manipulation
196195
DataFrame.sample
197196
DataFrame.set_axis
198197
DataFrame.set_index
199-
DataFrame.tail
200198
DataFrame.take
201199
DataFrame.truncate
202200

doc/source/user_guide/cookbook.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -459,7 +459,7 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
459459
df
460460
461461
# List the size of the animals with the highest weight.
462-
df.groupby("animal").apply(lambda subf: subf["size"][subf["weight"].idxmax()], include_groups=False)
462+
df.groupby("animal").apply(lambda subf: subf["size"][subf["weight"].idxmax()])
463463
464464
`Using get_group
465465
<https://stackoverflow.com/questions/14734533/how-to-access-pandas-groupby-dataframe-by-key>`__
@@ -482,7 +482,7 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
482482
return pd.Series(["L", avg_weight, True], index=["size", "weight", "adult"])
483483
484484
485-
expected_df = gb.apply(GrowUp, include_groups=False)
485+
expected_df = gb.apply(GrowUp)
486486
expected_df
487487
488488
`Expanding apply

doc/source/user_guide/dsintro.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -326,7 +326,7 @@ This case is handled identically to a dict of arrays.
326326

327327
.. ipython:: python
328328
329-
data = np.zeros((2,), dtype=[("A", "i4"), ("B", "f4"), ("C", "a10")])
329+
data = np.zeros((2,), dtype=[("A", "i4"), ("B", "f4"), ("C", "S10")])
330330
data[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
331331
332332
pd.DataFrame(data)

doc/source/user_guide/groupby.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1074,7 +1074,7 @@ missing values with the ``ffill()`` method.
10741074
).set_index("date")
10751075
df_re
10761076
1077-
df_re.groupby("group").resample("1D", include_groups=False).ffill()
1077+
df_re.groupby("group").resample("1D").ffill()
10781078
10791079
.. _groupby.filter:
10801080

@@ -1252,13 +1252,13 @@ the argument ``group_keys`` which defaults to ``True``. Compare
12521252

12531253
.. ipython:: python
12541254
1255-
df.groupby("A", group_keys=True).apply(lambda x: x, include_groups=False)
1255+
df.groupby("A", group_keys=True).apply(lambda x: x)
12561256
12571257
with
12581258

12591259
.. ipython:: python
12601260
1261-
df.groupby("A", group_keys=False).apply(lambda x: x, include_groups=False)
1261+
df.groupby("A", group_keys=False).apply(lambda x: x)
12621262
12631263
12641264
Numba accelerated routines
@@ -1742,7 +1742,7 @@ column index name will be used as the name of the inserted column:
17421742
result = {"b_sum": x["b"].sum(), "c_mean": x["c"].mean()}
17431743
return pd.Series(result, name="metrics")
17441744
1745-
result = df.groupby("a").apply(compute_metrics, include_groups=False)
1745+
result = df.groupby("a").apply(compute_metrics)
17461746
17471747
result
17481748

doc/source/whatsnew/v3.0.0.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ Other enhancements
5656
- :meth:`DataFrame.plot.scatter` argument ``c`` now accepts a column of strings, where rows with the same string are colored identically (:issue:`16827` and :issue:`16485`)
5757
- :func:`read_parquet` accepts ``to_pandas_kwargs`` which are forwarded to :meth:`pyarrow.Table.to_pandas` which enables passing additional keywords to customize the conversion to pandas, such as ``maps_as_pydicts`` to read the Parquet map data type as python dictionaries (:issue:`56842`)
5858
- :meth:`DataFrameGroupBy.transform`, :meth:`SeriesGroupBy.transform`, :meth:`DataFrameGroupBy.agg`, :meth:`SeriesGroupBy.agg`, :meth:`RollingGroupby.apply`, :meth:`ExpandingGroupby.apply`, :meth:`Rolling.apply`, :meth:`Expanding.apply`, :meth:`DataFrame.apply` with ``engine="numba"`` now supports positional arguments passed as kwargs (:issue:`58995`)
59+
- :meth:`Rolling.agg`, :meth:`Expanding.agg` and :meth:`ExponentialMovingWindow.agg` now accept :class:`NamedAgg` aggregations through ``**kwargs`` (:issue:`28333`)
5960
- :meth:`Series.map` can now accept kwargs to pass on to func (:issue:`59814`)
6061
- :meth:`pandas.concat` will raise a ``ValueError`` when ``ignore_index=True`` and ``keys`` is not ``None`` (:issue:`59274`)
6162
- :meth:`str.get_dummies` now accepts a ``dtype`` parameter to specify the dtype of the resulting DataFrame (:issue:`47872`)
@@ -553,6 +554,7 @@ Other Removals
553554
- Removed the ``method`` keyword in ``ExtensionArray.fillna``, implement ``ExtensionArray._pad_or_backfill`` instead (:issue:`53621`)
554555
- Removed the attribute ``dtypes`` from :class:`.DataFrameGroupBy` (:issue:`51997`)
555556
- Enforced deprecation of ``argmin``, ``argmax``, ``idxmin``, and ``idxmax`` returning a result when ``skipna=False`` and an NA value is encountered or all values are NA values; these operations will now raise in such cases (:issue:`33941`, :issue:`51276`)
557+
- Removed specifying ``include_groups=True`` in :class:`.DataFrameGroupBy.apply` and :class:`.Resampler.apply` (:issue:`7155`)
556558

557559
.. ---------------------------------------------------------------------------
558560
.. _whatsnew_300.performance:
@@ -626,6 +628,7 @@ Datetimelike
626628
- Bug in :meth:`DatetimeIndex.union` and :meth:`DatetimeIndex.intersection` when ``unit`` was non-nanosecond (:issue:`59036`)
627629
- Bug in :meth:`Series.dt.microsecond` producing incorrect results for pyarrow backed :class:`Series`. (:issue:`59154`)
628630
- Bug in :meth:`to_datetime` not respecting dayfirst if an uncommon date string was passed. (:issue:`58859`)
631+
- Bug in :meth:`to_datetime` on float32 df with year, month, day etc. columns leads to precision issues and incorrect result. (:issue:`60506`)
629632
- Bug in :meth:`to_datetime` reports incorrect index in case of any failure scenario. (:issue:`58298`)
630633
- Bug in :meth:`to_datetime` wrongly converts when ``arg`` is a ``np.datetime64`` object with unit of ``ps``. (:issue:`60341`)
631634
- Bug in setting scalar values with mismatched resolution into arrays with non-nanosecond ``datetime64``, ``timedelta64`` or :class:`DatetimeTZDtype` incorrectly truncating those scalars (:issue:`56410`)
@@ -733,6 +736,7 @@ Groupby/resample/rolling
733736
- Bug in :meth:`.Resampler.interpolate` on a :class:`DataFrame` with non-uniform sampling and/or indices not aligning with the resulting resampled index would result in wrong interpolation (:issue:`21351`)
734737
- Bug in :meth:`DataFrame.ewm` and :meth:`Series.ewm` when passed ``times`` and aggregation functions other than mean (:issue:`51695`)
735738
- Bug in :meth:`DataFrameGroupBy.agg` that raises ``AttributeError`` when there is dictionary input and duplicated columns, instead of returning a DataFrame with the aggregation of all duplicate columns. (:issue:`55041`)
739+
- Bug in :meth:`DataFrameGroupBy.apply` and :meth:`SeriesGroupBy.apply` for empty data frame with ``group_keys=False`` still creating output index using group keys. (:issue:`60471`)
736740
- Bug in :meth:`DataFrameGroupBy.apply` that was returning a completely empty DataFrame when all return values of ``func`` were ``None`` instead of returning an empty DataFrame with the original columns and dtypes. (:issue:`57775`)
737741
- Bug in :meth:`DataFrameGroupBy.apply` with ``as_index=False`` that was returning :class:`MultiIndex` instead of returning :class:`Index`. (:issue:`58291`)
738742
- Bug in :meth:`DataFrameGroupBy.cumsum` and :meth:`DataFrameGroupBy.cumprod` where ``numeric_only`` parameter was passed indirectly through kwargs instead of passing directly. (:issue:`58811`)
@@ -797,7 +801,9 @@ Other
797801
- Bug in :meth:`read_csv` where chained fsspec TAR file and ``compression="infer"`` fails with ``tarfile.ReadError`` (:issue:`60028`)
798802
- Bug in Dataframe Interchange Protocol implementation was returning incorrect results for data buffers' associated dtype, for string and datetime columns (:issue:`54781`)
799803
- Bug in ``Series.list`` methods not preserving the original :class:`Index`. (:issue:`58425`)
804+
- Bug in ``Series.list`` methods not preserving the original name. (:issue:`60522`)
800805
- Bug in printing a :class:`DataFrame` with a :class:`DataFrame` stored in :attr:`DataFrame.attrs` raised a ``ValueError`` (:issue:`60455`)
806+
- Bug in printing a :class:`Series` with a :class:`DataFrame` stored in :attr:`Series.attrs` raised a ``ValueError`` (:issue:`60568`)
801807

802808
.. ***DO NOT USE THIS SECTION***
803809

pandas/core/arrays/arrow/accessors.py

Lines changed: 20 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,10 @@ def len(self) -> Series:
117117

118118
value_lengths = pc.list_value_length(self._pa_array)
119119
return Series(
120-
value_lengths, dtype=ArrowDtype(value_lengths.type), index=self._data.index
120+
value_lengths,
121+
dtype=ArrowDtype(value_lengths.type),
122+
index=self._data.index,
123+
name=self._data.name,
121124
)
122125

123126
def __getitem__(self, key: int | slice) -> Series:
@@ -162,7 +165,10 @@ def __getitem__(self, key: int | slice) -> Series:
162165
# key = pc.add(key, pc.list_value_length(self._pa_array))
163166
element = pc.list_element(self._pa_array, key)
164167
return Series(
165-
element, dtype=ArrowDtype(element.type), index=self._data.index
168+
element,
169+
dtype=ArrowDtype(element.type),
170+
index=self._data.index,
171+
name=self._data.name,
166172
)
167173
elif isinstance(key, slice):
168174
if pa_version_under11p0:
@@ -181,7 +187,12 @@ def __getitem__(self, key: int | slice) -> Series:
181187
if step is None:
182188
step = 1
183189
sliced = pc.list_slice(self._pa_array, start, stop, step)
184-
return Series(sliced, dtype=ArrowDtype(sliced.type), index=self._data.index)
190+
return Series(
191+
sliced,
192+
dtype=ArrowDtype(sliced.type),
193+
index=self._data.index,
194+
name=self._data.name,
195+
)
185196
else:
186197
raise ValueError(f"key must be an int or slice, got {type(key).__name__}")
187198

@@ -223,7 +234,12 @@ def flatten(self) -> Series:
223234
counts = pa.compute.list_value_length(self._pa_array)
224235
flattened = pa.compute.list_flatten(self._pa_array)
225236
index = self._data.index.repeat(counts.fill_null(pa.scalar(0, counts.type)))
226-
return Series(flattened, dtype=ArrowDtype(flattened.type), index=index)
237+
return Series(
238+
flattened,
239+
dtype=ArrowDtype(flattened.type),
240+
index=index,
241+
name=self._data.name,
242+
)
227243

228244

229245
class StructAccessor(ArrowAccessor):

pandas/core/arrays/interval.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1306,6 +1306,20 @@ def length(self) -> Index:
13061306
"""
13071307
Return an Index with entries denoting the length of each Interval.
13081308
1309+
The length of an interval is calculated as the difference between
1310+
its `right` and `left` bounds. This property is particularly useful
1311+
when working with intervals where the size of the interval is an important
1312+
attribute, such as in time-series analysis or spatial data analysis.
1313+
1314+
See Also
1315+
--------
1316+
arrays.IntervalArray.left : Return the left endpoints of each Interval in
1317+
the IntervalArray as an Index.
1318+
arrays.IntervalArray.right : Return the right endpoints of each Interval in
1319+
the IntervalArray as an Index.
1320+
arrays.IntervalArray.mid : Return the midpoint of each Interval in the
1321+
IntervalArray as an Index.
1322+
13091323
Examples
13101324
--------
13111325

0 commit comments

Comments
 (0)