Skip to content
Closed
14 changes: 13 additions & 1 deletion ci/code_checks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,10 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
--format=actions \
-i ES01 `# For now it is ok if docstrings are missing the extended summary` \
-i "pandas.Series.dt PR01" `# Accessors are implemented as classes, but we do not document the Parameters section` \
-i "pandas.NA SA01" \
-i "pandas.Period.freq GL08" \
-i "pandas.Period.ordinal GL08" \
-i "pandas.PeriodDtype.freq SA01" \
-i "pandas.RangeIndex.from_range PR01,SA01" \
-i "pandas.Series.cat.add_categories PR01,PR02" \
-i "pandas.Series.cat.as_ordered PR01" \
Expand Down Expand Up @@ -101,14 +103,14 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
-i "pandas.Timedelta.max PR02" \
-i "pandas.Timedelta.min PR02" \
-i "pandas.Timedelta.resolution PR02" \
-i "pandas.TimedeltaIndex.to_pytimedelta RT03,SA01" \
-i "pandas.Timestamp.max PR02" \
-i "pandas.Timestamp.min PR02" \
-i "pandas.Timestamp.nanosecond GL08" \
-i "pandas.Timestamp.resolution PR02" \
-i "pandas.Timestamp.tzinfo GL08" \
-i "pandas.Timestamp.year GL08" \
-i "pandas.api.types.is_dict_like PR07,SA01" \
-i "pandas.api.types.is_extension_array_dtype SA01" \
-i "pandas.api.types.is_file_like PR07,SA01" \
-i "pandas.api.types.is_float PR01,SA01" \
-i "pandas.api.types.is_hashable PR01,RT03,SA01" \
Expand All @@ -120,6 +122,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
-i "pandas.api.types.pandas_dtype PR07,RT03,SA01" \
-i "pandas.arrays.ArrowExtensionArray PR07,SA01" \
-i "pandas.arrays.DatetimeArray SA01" \
-i "pandas.arrays.FloatingArray SA01" \
-i "pandas.arrays.IntegerArray SA01" \
-i "pandas.arrays.IntervalArray.left SA01" \
-i "pandas.arrays.IntervalArray.length SA01" \
Expand All @@ -133,12 +136,16 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
-i "pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01" \
-i "pandas.core.groupby.DataFrameGroupBy.get_group RT03,SA01" \
-i "pandas.core.groupby.DataFrameGroupBy.groups SA01" \
-i "pandas.core.groupby.DataFrameGroupBy.hist RT03" \
-i "pandas.core.groupby.DataFrameGroupBy.indices SA01" \
-i "pandas.core.groupby.DataFrameGroupBy.max SA01" \
-i "pandas.core.groupby.DataFrameGroupBy.min SA01" \
-i "pandas.core.groupby.DataFrameGroupBy.nth PR02" \
-i "pandas.core.groupby.DataFrameGroupBy.nunique SA01" \
-i "pandas.core.groupby.DataFrameGroupBy.ohlc SA01" \
-i "pandas.core.groupby.DataFrameGroupBy.plot PR02" \
-i "pandas.core.groupby.DataFrameGroupBy.sem SA01" \
-i "pandas.core.groupby.DataFrameGroupBy.sum SA01" \
-i "pandas.core.groupby.SeriesGroupBy.__iter__ RT03,SA01" \
-i "pandas.core.groupby.SeriesGroupBy.agg RT03" \
-i "pandas.core.groupby.SeriesGroupBy.aggregate RT03" \
Expand All @@ -147,10 +154,13 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
-i "pandas.core.groupby.SeriesGroupBy.indices SA01" \
-i "pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing SA01" \
-i "pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing SA01" \
-i "pandas.core.groupby.SeriesGroupBy.max SA01" \
-i "pandas.core.groupby.SeriesGroupBy.min SA01" \
-i "pandas.core.groupby.SeriesGroupBy.nth PR02" \
-i "pandas.core.groupby.SeriesGroupBy.ohlc SA01" \
-i "pandas.core.groupby.SeriesGroupBy.plot PR02" \
-i "pandas.core.groupby.SeriesGroupBy.sem SA01" \
-i "pandas.core.groupby.SeriesGroupBy.sum SA01" \
-i "pandas.core.resample.Resampler.__iter__ RT03,SA01" \
-i "pandas.core.resample.Resampler.get_group RT03,SA01" \
-i "pandas.core.resample.Resampler.groups SA01" \
Expand All @@ -176,6 +186,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
-i "pandas.errors.IntCastingNaNError SA01" \
-i "pandas.errors.InvalidIndexError SA01" \
-i "pandas.errors.InvalidVersion SA01" \
-i "pandas.errors.MergeError SA01" \
-i "pandas.errors.NullFrequencyError SA01" \
-i "pandas.errors.NumExprClobberingError SA01" \
-i "pandas.errors.NumbaUtilError SA01" \
Expand Down Expand Up @@ -367,6 +378,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
-i "pandas.tseries.offsets.Week.n GL08" \
-i "pandas.tseries.offsets.Week.normalize GL08" \
-i "pandas.tseries.offsets.Week.weekday GL08" \
-i "pandas.tseries.offsets.WeekOfMonth SA01" \
-i "pandas.tseries.offsets.WeekOfMonth.is_on_offset GL08" \
-i "pandas.tseries.offsets.WeekOfMonth.n GL08" \
-i "pandas.tseries.offsets.WeekOfMonth.normalize GL08" \
Expand Down
24 changes: 23 additions & 1 deletion pandas/core/arrays/timedeltas.py
Original file line number Diff line number Diff line change
Expand Up @@ -789,7 +789,20 @@ def to_pytimedelta(self) -> npt.NDArray[np.object_]:

Returns
-------
numpy.ndarray
**numpy.ndarray**
A NumPy ``timedelta64`` object representing the same duration as the
original pandas ``Timedelta`` object. The precision of the resulting
object is in nanoseconds, which is the default
time resolution used by pandas for ``Timedelta`` objects, ensuring
high precision for time-based calculations.

See Also
--------
to_timedelta : Convert argument to timedelta format.
Timedelta : Represents a duration between two dates or times.
DatetimeIndex: Index of datetime64 data.
Timedelta.components : Return a components namedtuple-like
of a single timedelta.

Examples
--------
Expand All @@ -800,7 +813,16 @@ def to_pytimedelta(self) -> npt.NDArray[np.object_]:
>>> tdelta_idx.to_pytimedelta()
array([datetime.timedelta(days=1), datetime.timedelta(days=2),
datetime.timedelta(days=3)], dtype=object)

>>> tidx = pd.TimedeltaIndex(data=["1 days 02:30:45", "3 days 04:15:10"])
>>> tidx
TimedeltaIndex(['1 days 02:30:45', '3 days 04:15:10'],
dtype='timedelta64[ns]', freq=None)
>>> tidx.to_pytimedelta()
array([datetime.timedelta(days=1, seconds=9045),
datetime.timedelta(days=3, seconds=15310)], dtype=object)
"""

return ints_to_pytimedelta(self._ndarray)

days_docstring = textwrap.dedent(
Expand Down
Loading
Loading