Skip to content

Commit 88bf2bf

Browse files
committed
Merge remote-tracking branch 'upstream/main' into debug/ft
2 parents a866149 + 659eecf commit 88bf2bf

File tree

20 files changed

+205
-89
lines changed

20 files changed

+205
-89
lines changed

ci/code_checks.sh

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
8181
-i "pandas.Timestamp.resolution PR02" \
8282
-i "pandas.Timestamp.tzinfo GL08" \
8383
-i "pandas.arrays.ArrowExtensionArray PR07,SA01" \
84-
-i "pandas.arrays.IntervalArray.length SA01" \
8584
-i "pandas.arrays.NumpyExtensionArray SA01" \
8685
-i "pandas.arrays.TimedeltaArray PR07,SA01" \
8786
-i "pandas.core.groupby.DataFrameGroupBy.plot PR02" \
@@ -94,11 +93,8 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
9493
-i "pandas.core.resample.Resampler.std SA01" \
9594
-i "pandas.core.resample.Resampler.transform PR01,RT03,SA01" \
9695
-i "pandas.core.resample.Resampler.var SA01" \
97-
-i "pandas.errors.UndefinedVariableError PR01,SA01" \
9896
-i "pandas.errors.ValueLabelTypeMismatch SA01" \
99-
-i "pandas.io.json.build_table_schema PR07,RT03,SA01" \
10097
-i "pandas.plotting.andrews_curves RT03,SA01" \
101-
-i "pandas.plotting.scatter_matrix PR07,SA01" \
10298
-i "pandas.tseries.offsets.BDay PR02,SA01" \
10399
-i "pandas.tseries.offsets.BQuarterBegin.is_on_offset GL08" \
104100
-i "pandas.tseries.offsets.BQuarterBegin.n GL08" \

doc/source/whatsnew/v3.0.0.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ Other enhancements
5656
- :meth:`DataFrame.plot.scatter` argument ``c`` now accepts a column of strings, where rows with the same string are colored identically (:issue:`16827` and :issue:`16485`)
5757
- :func:`read_parquet` accepts ``to_pandas_kwargs`` which are forwarded to :meth:`pyarrow.Table.to_pandas` which enables passing additional keywords to customize the conversion to pandas, such as ``maps_as_pydicts`` to read the Parquet map data type as python dictionaries (:issue:`56842`)
5858
- :meth:`DataFrameGroupBy.transform`, :meth:`SeriesGroupBy.transform`, :meth:`DataFrameGroupBy.agg`, :meth:`SeriesGroupBy.agg`, :meth:`RollingGroupby.apply`, :meth:`ExpandingGroupby.apply`, :meth:`Rolling.apply`, :meth:`Expanding.apply`, :meth:`DataFrame.apply` with ``engine="numba"`` now supports positional arguments passed as kwargs (:issue:`58995`)
59+
- :meth:`Rolling.agg`, :meth:`Expanding.agg` and :meth:`ExponentialMovingWindow.agg` now accept :class:`NamedAgg` aggregations through ``**kwargs`` (:issue:`28333`)
5960
- :meth:`Series.map` can now accept kwargs to pass on to func (:issue:`59814`)
6061
- :meth:`pandas.concat` will raise a ``ValueError`` when ``ignore_index=True`` and ``keys`` is not ``None`` (:issue:`59274`)
6162
- :meth:`str.get_dummies` now accepts a ``dtype`` parameter to specify the dtype of the resulting DataFrame (:issue:`47872`)
@@ -801,6 +802,7 @@ Other
801802
- Bug in ``Series.list`` methods not preserving the original :class:`Index`. (:issue:`58425`)
802803
- Bug in ``Series.list`` methods not preserving the original name. (:issue:`60522`)
803804
- Bug in printing a :class:`DataFrame` with a :class:`DataFrame` stored in :attr:`DataFrame.attrs` raised a ``ValueError`` (:issue:`60455`)
805+
- Bug in printing a :class:`Series` with a :class:`DataFrame` stored in :attr:`Series.attrs` raised a ``ValueError`` (:issue:`60568`)
804806

805807
.. ***DO NOT USE THIS SECTION***
806808

pandas/core/arrays/interval.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1306,6 +1306,20 @@ def length(self) -> Index:
13061306
"""
13071307
Return an Index with entries denoting the length of each Interval.
13081308
1309+
The length of an interval is calculated as the difference between
1310+
its `right` and `left` bounds. This property is particularly useful
1311+
when working with intervals where the size of the interval is an important
1312+
attribute, such as in time-series analysis or spatial data analysis.
1313+
1314+
See Also
1315+
--------
1316+
arrays.IntervalArray.left : Return the left endpoints of each Interval in
1317+
the IntervalArray as an Index.
1318+
arrays.IntervalArray.right : Return the right endpoints of each Interval in
1319+
the IntervalArray as an Index.
1320+
arrays.IntervalArray.mid : Return the midpoint of each Interval in the
1321+
IntervalArray as an Index.
1322+
13091323
Examples
13101324
--------
13111325

pandas/core/window/ewm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -490,7 +490,7 @@ def online(
490490
klass="Series/Dataframe",
491491
axis="",
492492
)
493-
def aggregate(self, func, *args, **kwargs):
493+
def aggregate(self, func=None, *args, **kwargs):
494494
return super().aggregate(func, *args, **kwargs)
495495

496496
agg = aggregate
@@ -981,7 +981,7 @@ def reset(self) -> None:
981981
"""
982982
self._mean.reset()
983983

984-
def aggregate(self, func, *args, **kwargs):
984+
def aggregate(self, func=None, *args, **kwargs):
985985
raise NotImplementedError("aggregate is not implemented.")
986986

987987
def std(self, bias: bool = False, *args, **kwargs):

pandas/core/window/expanding.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ def _get_window_indexer(self) -> BaseIndexer:
167167
klass="Series/Dataframe",
168168
axis="",
169169
)
170-
def aggregate(self, func, *args, **kwargs):
170+
def aggregate(self, func=None, *args, **kwargs):
171171
return super().aggregate(func, *args, **kwargs)
172172

173173
agg = aggregate

pandas/core/window/rolling.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,10 @@
4444

4545
from pandas.core._numba import executor
4646
from pandas.core.algorithms import factorize
47-
from pandas.core.apply import ResamplerWindowApply
47+
from pandas.core.apply import (
48+
ResamplerWindowApply,
49+
reconstruct_func,
50+
)
4851
from pandas.core.arrays import ExtensionArray
4952
from pandas.core.base import SelectionMixin
5053
import pandas.core.common as com
@@ -646,8 +649,12 @@ def _numba_apply(
646649
out = obj._constructor(result, index=index, columns=columns)
647650
return self._resolve_output(out, obj)
648651

649-
def aggregate(self, func, *args, **kwargs):
652+
def aggregate(self, func=None, *args, **kwargs):
653+
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
650654
result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()
655+
if isinstance(result, ABCDataFrame) and relabeling:
656+
result = result.iloc[:, order]
657+
result.columns = columns # type: ignore[union-attr]
651658
if result is None:
652659
return self.apply(func, raw=False, args=args, kwargs=kwargs)
653660
return result
@@ -1239,7 +1246,7 @@ def calc(x):
12391246
klass="Series/DataFrame",
12401247
axis="",
12411248
)
1242-
def aggregate(self, func, *args, **kwargs):
1249+
def aggregate(self, func=None, *args, **kwargs):
12431250
result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()
12441251
if result is None:
12451252
# these must apply directly
@@ -1951,7 +1958,7 @@ def _raise_monotonic_error(self, msg: str):
19511958
klass="Series/Dataframe",
19521959
axis="",
19531960
)
1954-
def aggregate(self, func, *args, **kwargs):
1961+
def aggregate(self, func=None, *args, **kwargs):
19551962
return super().aggregate(func, *args, **kwargs)
19561963

19571964
agg = aggregate

pandas/errors/__init__.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -588,6 +588,20 @@ class UndefinedVariableError(NameError):
588588
589589
It will also specify whether the undefined variable is local or not.
590590
591+
Parameters
592+
----------
593+
name : str
594+
The name of the undefined variable.
595+
is_local : bool or None, optional
596+
Indicates whether the undefined variable is considered a local variable.
597+
If ``True``, the error message specifies it as a local variable.
598+
If ``False`` or ``None``, the variable is treated as a non-local name.
599+
600+
See Also
601+
--------
602+
DataFrame.query : Query the columns of a DataFrame with a boolean expression.
603+
DataFrame.eval : Evaluate a string describing operations on DataFrame columns.
604+
591605
Examples
592606
--------
593607
>>> df = pd.DataFrame({"A": [1, 1, 1]})

pandas/io/formats/format.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,6 @@
7878
)
7979
from pandas.core.indexes.datetimes import DatetimeIndex
8080
from pandas.core.indexes.timedeltas import TimedeltaIndex
81-
from pandas.core.reshape.concat import concat
8281

8382
from pandas.io.common import (
8483
check_parent_directory,
@@ -245,7 +244,11 @@ def _chk_truncate(self) -> None:
245244
series = series.iloc[:max_rows]
246245
else:
247246
row_num = max_rows // 2
248-
series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
247+
_len = len(series)
248+
_slice = np.hstack(
249+
[np.arange(row_num), np.arange(_len - row_num, _len)]
250+
)
251+
series = series.iloc[_slice]
249252
self.tr_row_num = row_num
250253
else:
251254
self.tr_row_num = None

pandas/io/json/_table_schema.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,9 +239,16 @@ def build_table_schema(
239239
"""
240240
Create a Table schema from ``data``.
241241
242+
This method is a utility to generate a JSON-serializable schema
243+
representation of a pandas Series or DataFrame, compatible with the
244+
Table Schema specification. It enables structured data to be shared
245+
and validated in various applications, ensuring consistency and
246+
interoperability.
247+
242248
Parameters
243249
----------
244-
data : Series, DataFrame
250+
data : Series or DataFrame
251+
The input data for which the table schema is to be created.
245252
index : bool, default True
246253
Whether to include ``data.index`` in the schema.
247254
primary_key : bool or None, default True
@@ -256,6 +263,12 @@ def build_table_schema(
256263
Returns
257264
-------
258265
dict
266+
A dictionary representing the Table schema.
267+
268+
See Also
269+
--------
270+
DataFrame.to_json : Convert the object to a JSON string.
271+
read_json : Convert a JSON string to pandas object.
259272
260273
Notes
261274
-----

pandas/io/sql.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ def read_sql_table( # pyright: ignore[reportOverlappingOverload]
241241
schema=...,
242242
index_col: str | list[str] | None = ...,
243243
coerce_float=...,
244-
parse_dates: list[str] | dict[str, str] | None = ...,
244+
parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ...,
245245
columns: list[str] | None = ...,
246246
chunksize: None = ...,
247247
dtype_backend: DtypeBackend | lib.NoDefault = ...,
@@ -255,7 +255,7 @@ def read_sql_table(
255255
schema=...,
256256
index_col: str | list[str] | None = ...,
257257
coerce_float=...,
258-
parse_dates: list[str] | dict[str, str] | None = ...,
258+
parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ...,
259259
columns: list[str] | None = ...,
260260
chunksize: int = ...,
261261
dtype_backend: DtypeBackend | lib.NoDefault = ...,
@@ -268,7 +268,7 @@ def read_sql_table(
268268
schema: str | None = None,
269269
index_col: str | list[str] | None = None,
270270
coerce_float: bool = True,
271-
parse_dates: list[str] | dict[str, str] | None = None,
271+
parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = None,
272272
columns: list[str] | None = None,
273273
chunksize: int | None = None,
274274
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
@@ -372,7 +372,7 @@ def read_sql_query( # pyright: ignore[reportOverlappingOverload]
372372
index_col: str | list[str] | None = ...,
373373
coerce_float=...,
374374
params: list[Any] | Mapping[str, Any] | None = ...,
375-
parse_dates: list[str] | dict[str, str] | None = ...,
375+
parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ...,
376376
chunksize: None = ...,
377377
dtype: DtypeArg | None = ...,
378378
dtype_backend: DtypeBackend | lib.NoDefault = ...,
@@ -386,7 +386,7 @@ def read_sql_query(
386386
index_col: str | list[str] | None = ...,
387387
coerce_float=...,
388388
params: list[Any] | Mapping[str, Any] | None = ...,
389-
parse_dates: list[str] | dict[str, str] | None = ...,
389+
parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ...,
390390
chunksize: int = ...,
391391
dtype: DtypeArg | None = ...,
392392
dtype_backend: DtypeBackend | lib.NoDefault = ...,
@@ -399,7 +399,7 @@ def read_sql_query(
399399
index_col: str | list[str] | None = None,
400400
coerce_float: bool = True,
401401
params: list[Any] | Mapping[str, Any] | None = None,
402-
parse_dates: list[str] | dict[str, str] | None = None,
402+
parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = None,
403403
chunksize: int | None = None,
404404
dtype: DtypeArg | None = None,
405405
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,

0 commit comments

Comments
 (0)