Skip to content
1 change: 1 addition & 0 deletions doc/source/whatsnew/v0.25.1.rst
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ Groupby/resample/rolling
- Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`)
- Bug in windowing over read-only arrays (:issue:`27766`)
- Fixed segfault in `pandas.core.groupby.DataFrameGroupBy.quantile` when an invalid quantile was passed (:issue:`27470`)
- Bug in :meth:`pandas.core.groupby.GroupBy.shift`, :meth:`pandas.core.groupby.GroupBy.bfill` and :meth:`pandas.core.groupby.GroupBy.ffill` where timezone information would be dropped (:issue:`27992`)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you move to 0.25.2

-

Reshaping
Expand Down
12 changes: 7 additions & 5 deletions pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -2220,26 +2220,28 @@ def _get_cythonized_result(
base_func = getattr(libgroupby, how)

for name, obj in self._iterate_slices():
values = obj._data._values
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm I'm not sure relying on underlying block values is the best way to go about this. Is it possible to just work with the obj here instead?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry for my lack of understanding about underlying block values.
Should I leave the code using obj.values except for the last result = algorithms.take_nd(obj.values, result)?


if aggregate:
result_sz = ngroups
else:
result_sz = len(obj.values)
result_sz = len(values)

if not cython_dtype:
cython_dtype = obj.values.dtype
cython_dtype = values.dtype

result = np.zeros(result_sz, dtype=cython_dtype)
func = partial(base_func, result, labels)
inferences = None

if needs_values:
vals = obj.values
vals = values
if pre_processing:
vals, inferences = pre_processing(vals)
func = partial(func, vals)

if needs_mask:
mask = isna(obj.values).view(np.uint8)
mask = isna(values).view(np.uint8)
func = partial(func, mask)

if needs_ngroups:
Expand All @@ -2248,7 +2250,7 @@ def _get_cythonized_result(
func(**kwargs) # Call func to modify indexer values in place

if result_is_index:
result = algorithms.take_nd(obj.values, result)
result = algorithms.take_nd(values, result)

if post_processing:
result = post_processing(result, inferences)
Expand Down
66 changes: 66 additions & 0 deletions pandas/tests/groupby/test_groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -1882,3 +1882,69 @@ def test_groupby_axis_1(group_name):
results = df.groupby(group_name, axis=1).sum()
expected = df.T.groupby(group_name).sum().T
assert_frame_equal(results, expected)


@pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We have a tz_naive_fixture you can use in the function signature that will parameterize over more timezones.

def test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected):
    tz = tz_naive_fixture
    ...

@pytest.mark.parametrize(
"op, expected",
[
(
"shift",
{
"time": [
None,
None,
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
None,
None,
]
},
),
(
"bfill",
{
"time": [
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
]
},
),
(
"ffill",
{
"time": [
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
]
},
),
],
)
def test_shift_bfill_ffill_tz(tz, op, expected):
# GH27992: Check that timezone does not drop in shift, bfill, and ffill
data = {
"id": ["A", "B", "A", "B", "A", "B"],
"time": [
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
None,
None,
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
],
}
df = DataFrame(data).assign(time=lambda x: x.time.dt.tz_localize(tz))

grouped = df.groupby("id")
result = getattr(grouped, op)()
expected = DataFrame(expected).assign(time=lambda x: x.time.dt.tz_localize(tz))
assert_frame_equal(result, expected)