diff --git a/CHANGELOG.md b/CHANGELOG.md index 04b3c7c2d2bc..850db27b5217 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,6 +76,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ * (enterprise/poa) [#25838](https://github.com/cosmos/cosmos-sdk/pull/25838) Add the `poa` module under the `enterprise` directory. * (grpc) [#25850](https://github.com/cosmos/cosmos-sdk/pull/25850) Add `GetBlockResults` and `GetLatestBlockResults` gRPC endpoints to expose CometBFT block results including `finalize_block_events`. * (events) [#25877](https://github.com/cosmos/cosmos-sdk/pull/25877) Add `OverrideEvents` to `EventManagerI`. +* (staking) [#26023](https://github.com/cosmos/cosmos-sdk/pull/26023) Optimize staking end-block queue through using pending queue slots instead of iterators. ### Improvements diff --git a/tests/integration/staking/keeper/common_test.go b/tests/integration/staking/keeper/common_test.go index 4b78cc9195b8..f5174e72419a 100644 --- a/tests/integration/staking/keeper/common_test.go +++ b/tests/integration/staking/keeper/common_test.go @@ -3,6 +3,7 @@ package keeper_test import ( "math/big" "testing" + "time" cmtprototypes "github.com/cometbft/cometbft/proto/tendermint/types" "gotest.tools/v3/assert" @@ -151,7 +152,8 @@ func initFixture(tb testing.TB) *fixture { types.ModuleName: stakingModule, }) - sdkCtx := sdk.UnwrapSDKContext(integrationApp.Context()) + initialTime := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC) + sdkCtx := sdk.UnwrapSDKContext(integrationApp.Context()).WithBlockTime(initialTime) // Register MsgServer and QueryServer types.RegisterMsgServer(integrationApp.MsgServiceRouter(), stakingkeeper.NewMsgServerImpl(stakingKeeper)) diff --git a/tests/integration/staking/keeper/slash_test.go b/tests/integration/staking/keeper/slash_test.go index e307057030e3..317f4bf1d478 100644 --- a/tests/integration/staking/keeper/slash_test.go +++ b/tests/integration/staking/keeper/slash_test.go @@ -267,7 +267,8 @@ func TestSlashWithUnbondingDelegation(t *testing.T) { // set an unbonding delegation with expiration timestamp beyond which the // unbonding delegation shouldn't be slashed ubdTokens := f.stakingKeeper.TokensFromConsensusPower(f.sdkCtx, 4) - ubd := types.NewUnbondingDelegation(addrDels[0], addrVals[0], 11, time.Unix(0, 0), ubdTokens, 0, address.NewBech32Codec("cosmosvaloper"), address.NewBech32Codec("cosmos")) + completionTime := f.sdkCtx.BlockTime().Add(time.Second) + ubd := types.NewUnbondingDelegation(addrDels[0], addrVals[0], 11, completionTime, ubdTokens, 0, address.NewBech32Codec("cosmosvaloper"), address.NewBech32Codec("cosmos")) assert.NilError(t, f.stakingKeeper.SetUnbondingDelegation(f.sdkCtx, ubd)) // slash validator for the first time @@ -397,7 +398,8 @@ func TestSlashWithRedelegation(t *testing.T) { // set a redelegation rdTokens := f.stakingKeeper.TokensFromConsensusPower(f.sdkCtx, 6) - rd := types.NewRedelegation(addrDels[0], addrVals[0], addrVals[1], 11, time.Unix(0, 0), rdTokens, math.LegacyNewDecFromInt(rdTokens), 0, address.NewBech32Codec("cosmosvaloper"), address.NewBech32Codec("cosmos")) + rdCompletionTime := f.sdkCtx.BlockTime().Add(time.Second) + rd := types.NewRedelegation(addrDels[0], addrVals[0], addrVals[1], 11, rdCompletionTime, rdTokens, math.LegacyNewDecFromInt(rdTokens), 0, address.NewBech32Codec("cosmosvaloper"), address.NewBech32Codec("cosmos")) assert.NilError(t, f.stakingKeeper.SetRedelegation(f.sdkCtx, rd)) // set the associated delegation @@ -555,7 +557,8 @@ func TestSlashBoth(t *testing.T) { // set a redelegation with expiration timestamp beyond which the // redelegation shouldn't be slashed rdATokens := f.stakingKeeper.TokensFromConsensusPower(f.sdkCtx, 6) - rdA := types.NewRedelegation(addrDels[0], addrVals[0], addrVals[1], 11, time.Unix(0, 0), rdATokens, math.LegacyNewDecFromInt(rdATokens), 0, address.NewBech32Codec("cosmosvaloper"), address.NewBech32Codec("cosmos")) + rdCompletionTime := f.sdkCtx.BlockTime().Add(time.Second) + rdA := types.NewRedelegation(addrDels[0], addrVals[0], addrVals[1], 11, rdCompletionTime, rdATokens, math.LegacyNewDecFromInt(rdATokens), 0, address.NewBech32Codec("cosmosvaloper"), address.NewBech32Codec("cosmos")) assert.NilError(t, f.stakingKeeper.SetRedelegation(f.sdkCtx, rdA)) // set the associated delegation @@ -565,8 +568,9 @@ func TestSlashBoth(t *testing.T) { // set an unbonding delegation with expiration timestamp (beyond which the // unbonding delegation shouldn't be slashed) ubdATokens := f.stakingKeeper.TokensFromConsensusPower(f.sdkCtx, 4) + ubdCompletionTime := f.sdkCtx.BlockTime().Add(time.Second) ubdA := types.NewUnbondingDelegation(addrDels[0], addrVals[0], 11, - time.Unix(0, 0), ubdATokens, 0, address.NewBech32Codec("cosmosvaloper"), address.NewBech32Codec("cosmos")) + ubdCompletionTime, ubdATokens, 0, address.NewBech32Codec("cosmosvaloper"), address.NewBech32Codec("cosmos")) assert.NilError(t, f.stakingKeeper.SetUnbondingDelegation(f.sdkCtx, ubdA)) bondedCoins := sdk.NewCoins(sdk.NewCoin(bondDenom, rdATokens.MulRaw(2))) diff --git a/x/slashing/keeper/slash_redelegation_test.go b/x/slashing/keeper/slash_redelegation_test.go index bfbbaa3abf58..37e9b7688e0b 100644 --- a/x/slashing/keeper/slash_redelegation_test.go +++ b/x/slashing/keeper/slash_redelegation_test.go @@ -37,8 +37,10 @@ func TestSlashRedelegation(t *testing.T) { ), &stakingKeeper, &bankKeeper, &slashKeeper, &distrKeeper) require.NoError(t, err) + initialTime := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC) + ctx := app.NewContext(false).WithBlockTime(initialTime) + // get sdk context, staking msg server and bond denom - ctx := app.NewContext(false) stakingMsgServer := stakingkeeper.NewMsgServerImpl(stakingKeeper) bondDenom, err := stakingKeeper.BondDenom(ctx) require.NoError(t, err) diff --git a/x/staking/keeper/delegation.go b/x/staking/keeper/delegation.go index bf5e62351381..516659e3a9fe 100644 --- a/x/staking/keeper/delegation.go +++ b/x/staking/keeper/delegation.go @@ -7,7 +7,6 @@ import ( "fmt" "time" - corestore "cosmossdk.io/core/store" errorsmod "cosmossdk.io/errors" "cosmossdk.io/math" storetypes "cosmossdk.io/store/types" @@ -490,7 +489,10 @@ func (k Keeper) SetUBDQueueTimeSlice(ctx context.Context, timestamp time.Time, k if err != nil { return err } - return store.Set(types.GetUnbondingDelegationTimeKey(timestamp), bz) + if err = store.Set(types.GetUnbondingDelegationTimeKey(timestamp), bz); err != nil { + return err + } + return k.AddUBDQueuePendingSlot(ctx, timestamp) } // InsertUBDQueue inserts an unbonding delegation to the appropriate timeslice @@ -514,41 +516,52 @@ func (k Keeper) InsertUBDQueue(ctx context.Context, ubd types.UnbondingDelegatio return k.SetUBDQueueTimeSlice(ctx, completionTime, timeSlice) } -// UBDQueueIterator returns all the unbonding queue timeslices from time 0 until endTime. -func (k Keeper) UBDQueueIterator(ctx context.Context, endTime time.Time) (corestore.Iterator, error) { - store := k.storeService.OpenKVStore(ctx) - return store.Iterator(types.UnbondingQueueKey, - storetypes.InclusiveEndBytes(types.GetUnbondingDelegationTimeKey(endTime))) -} - // DequeueAllMatureUBDQueue returns a concatenated list of all the timeslices inclusively previous to -// currTime, and deletes the timeslices from the queue. +// currTime, and deletes the timeslices from the queue. Uses the pending-slot index (populated by +// Migrate5to6); slots are read once and written once (batch update). +// Read phase collects mature timeslices; write phase deletes keys and updates pending slots so that +// on any error no queue keys are deleted and state remains consistent. func (k Keeper) DequeueAllMatureUBDQueue(ctx context.Context, currTime time.Time) (matureUnbonds []types.DVPair, err error) { store := k.storeService.OpenKVStore(ctx) - // gets an iterator for all timeslices from time 0 until the current Blockheader time - unbondingTimesliceIterator, err := k.UBDQueueIterator(ctx, currTime) + slots, err := k.GetUBDQueuePendingSlots(ctx) if err != nil { - return matureUnbonds, err + return nil, err + } + if len(slots) == 0 { + return matureUnbonds, nil } - defer unbondingTimesliceIterator.Close() - for ; unbondingTimesliceIterator.Valid(); unbondingTimesliceIterator.Next() { - timeslice := types.DVPairs{} - value := unbondingTimesliceIterator.Value() - if err = k.cdc.Unmarshal(value, ×lice); err != nil { - return matureUnbonds, err + var remaining []time.Time + var keysToDelete [][]byte + for _, t := range slots { + if t.After(currTime) { + remaining = append(remaining, t) + continue + } + queueKey := types.GetUnbondingDelegationTimeKey(t) + bz, err := store.Get(queueKey) + if err != nil { + return nil, err + } + if bz == nil { + continue // already deleted, omit from remaining } + timeslice := types.DVPairs{} + if err = k.cdc.Unmarshal(bz, ×lice); err != nil { + return nil, err + } matureUnbonds = append(matureUnbonds, timeslice.Pairs...) + keysToDelete = append(keysToDelete, queueKey) + } - if err = store.Delete(unbondingTimesliceIterator.Key()); err != nil { + for _, key := range keysToDelete { + if err = store.Delete(key); err != nil { return matureUnbonds, err } - } - - return matureUnbonds, nil + return matureUnbonds, k.SetUBDQueuePendingSlots(ctx, remaining) } // GetRedelegations returns a given amount of all the delegator redelegations. @@ -802,7 +815,10 @@ func (k Keeper) SetRedelegationQueueTimeSlice(ctx context.Context, timestamp tim if err != nil { return err } - return store.Set(types.GetRedelegationTimeKey(timestamp), bz) + if err = store.Set(types.GetRedelegationTimeKey(timestamp), bz); err != nil { + return err + } + return k.AddRedelegationQueuePendingSlot(ctx, timestamp) } // InsertRedelegationQueue insert an redelegation delegation to the appropriate @@ -826,42 +842,53 @@ func (k Keeper) InsertRedelegationQueue(ctx context.Context, red types.Redelegat return k.SetRedelegationQueueTimeSlice(ctx, completionTime, timeSlice) } -// RedelegationQueueIterator returns all the redelegation queue timeslices from -// time 0 until endTime. -func (k Keeper) RedelegationQueueIterator(ctx context.Context, endTime time.Time) (storetypes.Iterator, error) { - store := k.storeService.OpenKVStore(ctx) - return store.Iterator(types.RedelegationQueueKey, storetypes.InclusiveEndBytes(types.GetRedelegationTimeKey(endTime))) -} - // DequeueAllMatureRedelegationQueue returns a concatenated list of all the // timeslices inclusively previous to currTime, and deletes the timeslices from -// the queue. +// the queue. Uses the pending-slot index (populated by Migrate5to6); slots are +// read once and written once (batch update). +// Read phase collects mature timeslices; write phase deletes keys and updates pending slots so that +// on any error no queue keys are deleted and state remains consistent. func (k Keeper) DequeueAllMatureRedelegationQueue(ctx context.Context, currTime time.Time) (matureRedelegations []types.DVVTriplet, err error) { store := k.storeService.OpenKVStore(ctx) - // gets an iterator for all timeslices from time 0 until the current Blockheader time - sdkCtx := sdk.UnwrapSDKContext(ctx) - redelegationTimesliceIterator, err := k.RedelegationQueueIterator(ctx, sdkCtx.HeaderInfo().Time) + slots, err := k.GetRedelegationQueuePendingSlots(ctx) if err != nil { return nil, err } - defer redelegationTimesliceIterator.Close() + if len(slots) == 0 { + return matureRedelegations, nil + } - for ; redelegationTimesliceIterator.Valid(); redelegationTimesliceIterator.Next() { - timeslice := types.DVVTriplets{} - value := redelegationTimesliceIterator.Value() - if err = k.cdc.Unmarshal(value, ×lice); err != nil { + var remaining []time.Time + var keysToDelete [][]byte + for _, t := range slots { + if t.After(currTime) { + remaining = append(remaining, t) + continue + } + queueKey := types.GetRedelegationTimeKey(t) + bz, err := store.Get(queueKey) + if err != nil { return nil, err } + if bz == nil { + continue + } - matureRedelegations = append(matureRedelegations, timeslice.Triplets...) - - if err = store.Delete(redelegationTimesliceIterator.Key()); err != nil { + timeslice := types.DVVTriplets{} + if err = k.cdc.Unmarshal(bz, ×lice); err != nil { return nil, err } + matureRedelegations = append(matureRedelegations, timeslice.Triplets...) + keysToDelete = append(keysToDelete, queueKey) } - return matureRedelegations, nil + for _, key := range keysToDelete { + if err = store.Delete(key); err != nil { + return matureRedelegations, err + } + } + return matureRedelegations, k.SetRedelegationQueuePendingSlots(ctx, remaining) } // Delegate performs a delegation, set/update everything necessary within the store. diff --git a/x/staking/keeper/migrations.go b/x/staking/keeper/migrations.go index e94848b13397..c2e73287cee4 100644 --- a/x/staking/keeper/migrations.go +++ b/x/staking/keeper/migrations.go @@ -8,6 +8,7 @@ import ( v3 "github.com/cosmos/cosmos-sdk/x/staking/migrations/v3" v4 "github.com/cosmos/cosmos-sdk/x/staking/migrations/v4" v5 "github.com/cosmos/cosmos-sdk/x/staking/migrations/v5" + v6 "github.com/cosmos/cosmos-sdk/x/staking/migrations/v6" ) // Migrator is a struct for handling in-place store migrations. @@ -47,3 +48,9 @@ func (m Migrator) Migrate4to5(ctx sdk.Context) error { store := runtime.KVStoreAdapter(m.keeper.storeService.OpenKVStore(ctx)) return v5.MigrateStore(ctx, store, m.keeper.cdc) } + +// Migrate5to6 migrates x/staking state from consensus version 5 to 6. +func (m Migrator) Migrate5to6(ctx sdk.Context) error { + store := m.keeper.storeService.OpenKVStore(ctx) + return v6.MigrateStore(ctx, store, m.keeper) +} diff --git a/x/staking/keeper/pending_queue_slots.go b/x/staking/keeper/pending_queue_slots.go new file mode 100644 index 000000000000..500247dfc363 --- /dev/null +++ b/x/staking/keeper/pending_queue_slots.go @@ -0,0 +1,225 @@ +package keeper + +import ( + "context" + "encoding/binary" + "fmt" + "sort" + "time" + + "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +// Binary encoding constants for pending slot lists. +// Layout: [countBytes] (uint32) then for each slot: [timeBytes][heightBytes] (validator) or [timeBytes] (UBD/redelegation). +const ( + countBytes = 4 // bytes for slot count (uint32 big-endian) + timeSlotSizeBytes = 8 // uint64 bytes per slot for time-only queues (UBD, redelegation) + heightSlotSizeBytes = 8 // uint64 bytes for height used for unbonding validators + timeHeightSlotSizeBytes = timeSlotSizeBytes + heightSlotSizeBytes +) + +func insufficientCapacity(bz []byte, count, slotSize uint64) bool { + actualBytes := uint64(len(bz)) + requiredBytes := count * slotSize + return actualBytes < requiredBytes +} + +// GetValidatorQueuePendingSlots reads the list of (time, height) slots that have validator queue entries. +func (k Keeper) GetValidatorQueuePendingSlots(ctx context.Context) ([]types.TimeHeightQueueSlot, error) { + store := k.storeService.OpenKVStore(ctx) + bz, err := store.Get(types.ValidatorQueuePendingSlotsKey) + if err != nil { + return nil, err + } + if len(bz) == 0 { + return nil, nil + } + if len(bz) < countBytes { + return nil, fmt.Errorf("%w: key=%x", types.ErrPendingQueueSlotMissingCount, types.ValidatorQueuePendingSlotsKey) + } + n := binary.BigEndian.Uint32(bz[:countBytes]) + if n == 0 { + return nil, nil + } + bz = bz[countBytes:] + if insufficientCapacity(bz, uint64(n), timeHeightSlotSizeBytes) { + return nil, fmt.Errorf("%w: key=%x, count=%d", types.ErrPendingQueueSlotInsufficientCapacity, types.ValidatorQueuePendingSlotsKey, n) + } + slots := make([]types.TimeHeightQueueSlot, 0, n) + for i := uint32(0); i < n; i++ { + offset := i * timeHeightSlotSizeBytes + nanos := binary.BigEndian.Uint64(bz[offset : offset+timeSlotSizeBytes]) + height := int64(binary.BigEndian.Uint64(bz[offset+timeSlotSizeBytes : offset+timeHeightSlotSizeBytes])) + slots = append(slots, types.TimeHeightQueueSlot{ + Time: time.Unix(0, int64(nanos)).UTC(), + Height: height, + }) + } + return slots, nil +} + +// SetValidatorQueuePendingSlots sets the validator queue pending slots. +func (k Keeper) SetValidatorQueuePendingSlots(ctx context.Context, slots []types.TimeHeightQueueSlot) error { + store := k.storeService.OpenKVStore(ctx) + if len(slots) == 0 { + return store.Delete(types.ValidatorQueuePendingSlotsKey) + } + + sortAscending := func(i, j int) bool { + if slots[i].Time.Before(slots[j].Time) { + return true + } + if slots[j].Time.Before(slots[i].Time) { + return false + } + return slots[i].Height < slots[j].Height + } + + sort.Slice(slots, sortAscending) + + seen := make(map[string]struct{}) + uniqueSlots := make([]types.TimeHeightQueueSlot, 0, len(slots)) + for _, s := range slots { + key := string(binary.BigEndian.AppendUint64(nil, uint64(s.Time.UnixNano()))) + + string(binary.BigEndian.AppendUint64(nil, uint64(s.Height))) + if _, ok := seen[key]; !ok { + seen[key] = struct{}{} + uniqueSlots = append(uniqueSlots, s) + } + } + + bz := make([]byte, countBytes+len(uniqueSlots)*timeHeightSlotSizeBytes) + binary.BigEndian.PutUint32(bz[:countBytes], uint32(len(uniqueSlots))) + for i, s := range uniqueSlots { + offset := countBytes + i*timeHeightSlotSizeBytes + binary.BigEndian.PutUint64(bz[offset:offset+timeSlotSizeBytes], uint64(s.Time.UnixNano())) + binary.BigEndian.PutUint64(bz[offset+timeSlotSizeBytes:offset+timeHeightSlotSizeBytes], uint64(s.Height)) + } + return store.Set(types.ValidatorQueuePendingSlotsKey, bz) +} + +// AddValidatorQueuePendingSlot adds (time, height) to the pending list if not already present. +func (k Keeper) AddValidatorQueuePendingSlot(ctx context.Context, endTime time.Time, endHeight int64) error { + slots, err := k.GetValidatorQueuePendingSlots(ctx) + if err != nil { + return err + } + slots = append(slots, types.TimeHeightQueueSlot{Time: endTime, Height: endHeight}) + return k.SetValidatorQueuePendingSlots(ctx, slots) +} + +// RemoveValidatorQueuePendingSlot removes (time, height) from the pending list. +func (k Keeper) RemoveValidatorQueuePendingSlot(ctx context.Context, endTime time.Time, endHeight int64) error { + slots, err := k.GetValidatorQueuePendingSlots(ctx) + if err != nil { + return err + } + newSlots := make([]types.TimeHeightQueueSlot, 0, len(slots)) + for _, s := range slots { + if toRemain := !s.Time.Equal(endTime) || s.Height != endHeight; toRemain { + newSlots = append(newSlots, s) + } + } + return k.SetValidatorQueuePendingSlots(ctx, newSlots) +} + +// --- Time queue pending (time only) - shared by UBD and Redelegation --- + +// getTimeQueuePendingSlots reads the list of time slots for the given key. +func (k Keeper) getTimeQueuePendingSlots(ctx context.Context, key []byte) ([]time.Time, error) { + store := k.storeService.OpenKVStore(ctx) + bz, err := store.Get(key) + if err != nil { + return nil, err + } + if len(bz) == 0 { + return nil, nil + } + if len(bz) < countBytes { + return nil, fmt.Errorf("%w: key=%x", types.ErrPendingQueueSlotMissingCount, key) + } + n := binary.BigEndian.Uint32(bz[:countBytes]) + if n == 0 { + return nil, nil + } + bz = bz[countBytes:] + if insufficientCapacity(bz, uint64(n), timeSlotSizeBytes) { + return nil, fmt.Errorf("%w: key=%x, count=%d", types.ErrPendingQueueSlotInsufficientCapacity, key, n) + } + slots := make([]time.Time, 0, n) + for i := uint32(0); i < n; i++ { + off := i * timeSlotSizeBytes + nanos := binary.BigEndian.Uint64(bz[off : off+timeSlotSizeBytes]) + slots = append(slots, time.Unix(0, int64(nanos)).UTC()) + } + return slots, nil +} + +// setTimeQueuePendingSlots sets the time queue pending slots for the given key. +func (k Keeper) setTimeQueuePendingSlots(ctx context.Context, key []byte, slots []time.Time) error { + store := k.storeService.OpenKVStore(ctx) + if len(slots) == 0 { + return store.Delete(key) + } + sort.Slice(slots, func(i, j int) bool { return slots[i].Before(slots[j]) }) + seen := make(map[int64]struct{}) + uniqueSlots := make([]time.Time, 0, len(slots)) + for _, t := range slots { + n := t.UnixNano() + if _, ok := seen[n]; !ok { + seen[n] = struct{}{} + uniqueSlots = append(uniqueSlots, t) + } + } + bz := make([]byte, countBytes+len(uniqueSlots)*timeSlotSizeBytes) + binary.BigEndian.PutUint32(bz[:countBytes], uint32(len(uniqueSlots))) + for i, t := range uniqueSlots { + binary.BigEndian.PutUint64(bz[countBytes+i*timeSlotSizeBytes:countBytes+(i+1)*timeSlotSizeBytes], uint64(t.UnixNano())) + } + return store.Set(key, bz) +} + +// addTimeQueuePendingSlot adds a time slot to the pending list if not already present. +func (k Keeper) addTimeQueuePendingSlot(ctx context.Context, key []byte, completionTime time.Time) error { + slots, err := k.getTimeQueuePendingSlots(ctx, key) + if err != nil { + return err + } + slots = append(slots, completionTime) + return k.setTimeQueuePendingSlots(ctx, key, slots) +} + +// --- UBD queue pending (time only) --- + +// GetUBDQueuePendingSlots reads the list of time slots that have UBD queue entries. +func (k Keeper) GetUBDQueuePendingSlots(ctx context.Context) ([]time.Time, error) { + return k.getTimeQueuePendingSlots(ctx, types.UBDQueuePendingSlotsKey) +} + +// SetUBDQueuePendingSlots sets the UBD queue pending slots. +func (k Keeper) SetUBDQueuePendingSlots(ctx context.Context, slots []time.Time) error { + return k.setTimeQueuePendingSlots(ctx, types.UBDQueuePendingSlotsKey, slots) +} + +// AddUBDQueuePendingSlot adds a time slot to the UBD pending list if not already present. +func (k Keeper) AddUBDQueuePendingSlot(ctx context.Context, completionTime time.Time) error { + return k.addTimeQueuePendingSlot(ctx, types.UBDQueuePendingSlotsKey, completionTime) +} + +// --- Redelegation queue pending (time only) --- + +// GetRedelegationQueuePendingSlots reads the list of time slots that have redelegation queue entries. +func (k Keeper) GetRedelegationQueuePendingSlots(ctx context.Context) ([]time.Time, error) { + return k.getTimeQueuePendingSlots(ctx, types.RedelegationQueuePendingSlotsKey) +} + +// SetRedelegationQueuePendingSlots sets the redelegation queue pending slots. +func (k Keeper) SetRedelegationQueuePendingSlots(ctx context.Context, slots []time.Time) error { + return k.setTimeQueuePendingSlots(ctx, types.RedelegationQueuePendingSlotsKey, slots) +} + +// AddRedelegationQueuePendingSlot adds a time slot to the redelegation pending list if not already present. +func (k Keeper) AddRedelegationQueuePendingSlot(ctx context.Context, completionTime time.Time) error { + return k.addTimeQueuePendingSlot(ctx, types.RedelegationQueuePendingSlotsKey, completionTime) +} diff --git a/x/staking/keeper/pending_queue_slots_test.go b/x/staking/keeper/pending_queue_slots_test.go new file mode 100644 index 000000000000..4bd32b6cc520 --- /dev/null +++ b/x/staking/keeper/pending_queue_slots_test.go @@ -0,0 +1,714 @@ +package keeper_test + +import ( + "time" + + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +func (s *KeeperTestSuite) TestGetValidatorQueuePendingSlots_NoEntries() { + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Nil(slots) +} + +func (s *KeeperTestSuite) TestGetValidatorQueuePendingSlots_SingleEntry() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + testHeight := int64(100) + + err := s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, testTime, testHeight) + s.Require().NoError(err) + + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) + s.Require().Equal(testTime, slots[0].Time) + s.Require().Equal(testHeight, slots[0].Height) +} + +func (s *KeeperTestSuite) TestGetValidatorQueuePendingSlots_MultipleEntries() { + testTimes := []time.Time{ + time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), + } + testHeights := []int64{100, 200, 300} + + for i, t := range testTimes { + err := s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, t, testHeights[i]) + s.Require().NoError(err) + } + + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 3) + // Should be sorted by time, then height + for i, slot := range slots { + s.Require().Equal(testTimes[i], slot.Time) + s.Require().Equal(testHeights[i], slot.Height) + } +} + +func (s *KeeperTestSuite) TestSetValidatorQueuePendingSlots_EmptySlice() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + testHeight := int64(100) + + // Add a slot first + err := s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, testTime, testHeight) + s.Require().NoError(err) + + // Set empty slice (should delete) + err = s.stakingKeeper.SetValidatorQueuePendingSlots(s.ctx, []stakingtypes.TimeHeightQueueSlot{}) + s.Require().NoError(err) + + // Verify it's deleted + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Empty(slots) +} + +func (s *KeeperTestSuite) TestSetValidatorQueuePendingSlots_DuplicateEntries() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + testHeight := int64(100) + + // Set with duplicates + slots := []stakingtypes.TimeHeightQueueSlot{ + {Time: testTime, Height: testHeight}, + {Time: testTime, Height: testHeight}, // duplicate + {Time: testTime, Height: testHeight}, // duplicate + } + + err := s.stakingKeeper.SetValidatorQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + // Verify only one entry persisted + retrievedSlots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 1) + s.Require().Equal(testTime, retrievedSlots[0].Time) + s.Require().Equal(testHeight, retrievedSlots[0].Height) +} + +func (s *KeeperTestSuite) TestSetValidatorQueuePendingSlots_SingleEntry() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + testHeight := int64(100) + + slots := []stakingtypes.TimeHeightQueueSlot{ + {Time: testTime, Height: testHeight}, + } + + err := s.stakingKeeper.SetValidatorQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 1) + s.Require().Equal(testTime, retrievedSlots[0].Time) + s.Require().Equal(testHeight, retrievedSlots[0].Height) +} + +func (s *KeeperTestSuite) TestSetValidatorQueuePendingSlots_MultipleEntries() { + slots := []stakingtypes.TimeHeightQueueSlot{ + {Time: time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), Height: 300}, + {Time: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), Height: 100}, + {Time: time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), Height: 200}, + } + + err := s.stakingKeeper.SetValidatorQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 3) + // Should be sorted by time, then height + s.Require().Equal(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), retrievedSlots[0].Time) + s.Require().Equal(int64(100), retrievedSlots[0].Height) + s.Require().Equal(time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), retrievedSlots[1].Time) + s.Require().Equal(int64(200), retrievedSlots[1].Height) + s.Require().Equal(time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), retrievedSlots[2].Time) + s.Require().Equal(int64(300), retrievedSlots[2].Height) +} + +func (s *KeeperTestSuite) TestAddValidatorQueuePendingSlot() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + testHeight := int64(100) + + err := s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, testTime, testHeight) + s.Require().NoError(err) + + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) + s.Require().Equal(testTime, slots[0].Time) + s.Require().Equal(testHeight, slots[0].Height) + + // Adding the same slot again should not create a duplicate + err = s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, testTime, testHeight) + s.Require().NoError(err) + + slots, err = s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) // Still only one entry +} + +func (s *KeeperTestSuite) TestRemoveValidatorQueuePendingSlot() { + testTime1 := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + testHeight1 := int64(100) + testTime2 := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC) + testHeight2 := int64(200) + + // Add two slots + err := s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, testTime1, testHeight1) + s.Require().NoError(err) + err = s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, testTime2, testHeight2) + s.Require().NoError(err) + + // Remove one + err = s.stakingKeeper.RemoveValidatorQueuePendingSlot(s.ctx, testTime1, testHeight1) + s.Require().NoError(err) + + // Verify only one remains + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) + s.Require().Equal(testTime2, slots[0].Time) + s.Require().Equal(testHeight2, slots[0].Height) +} + +func (s *KeeperTestSuite) TestSetValidatorQueuePendingSlots_SortingEdgeCases_SameTimeDifferentHeights() { + // Same time, different heights - should sort by height + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + slots := []stakingtypes.TimeHeightQueueSlot{ + {Time: testTime, Height: 300}, + {Time: testTime, Height: 100}, + {Time: testTime, Height: 200}, + } + + err := s.stakingKeeper.SetValidatorQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 3) + // Should be sorted by height when times are equal + s.Require().Equal(testTime, retrievedSlots[0].Time) + s.Require().Equal(int64(100), retrievedSlots[0].Height) + s.Require().Equal(testTime, retrievedSlots[1].Time) + s.Require().Equal(int64(200), retrievedSlots[1].Height) + s.Require().Equal(testTime, retrievedSlots[2].Time) + s.Require().Equal(int64(300), retrievedSlots[2].Height) +} + +func (s *KeeperTestSuite) TestSetValidatorQueuePendingSlots_SortingEdgeCases_SameHeightDifferentTimes() { + // Same height, different times - should sort by time first + testHeight := int64(100) + slots := []stakingtypes.TimeHeightQueueSlot{ + {Time: time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), Height: testHeight}, + {Time: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), Height: testHeight}, + {Time: time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), Height: testHeight}, + } + + err := s.stakingKeeper.SetValidatorQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 3) + // Should be sorted by time first + s.Require().Equal(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), retrievedSlots[0].Time) + s.Require().Equal(testHeight, retrievedSlots[0].Height) + s.Require().Equal(time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), retrievedSlots[1].Time) + s.Require().Equal(testHeight, retrievedSlots[1].Height) + s.Require().Equal(time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), retrievedSlots[2].Time) + s.Require().Equal(testHeight, retrievedSlots[2].Height) +} + +func (s *KeeperTestSuite) TestSetValidatorQueuePendingSlots_Deduplication_SameTimeDifferentHeight() { + // Same time but different height should NOT deduplicate + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + slots := []stakingtypes.TimeHeightQueueSlot{ + {Time: testTime, Height: 100}, + {Time: testTime, Height: 200}, // Different height, should NOT be deduplicated + {Time: testTime, Height: 100}, // Same time+height, should be deduplicated + } + + err := s.stakingKeeper.SetValidatorQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 2) // Should have 2 unique entries (100 and 200) + + // Should be sorted + s.Require().Equal(testTime, retrievedSlots[0].Time) + s.Require().Equal(int64(100), retrievedSlots[0].Height) + s.Require().Equal(testTime, retrievedSlots[1].Time) + s.Require().Equal(int64(200), retrievedSlots[1].Height) +} + +func (s *KeeperTestSuite) TestSetValidatorQueuePendingSlots_Deduplication_SameHeightDifferentTime() { + // Same height but different time should NOT deduplicate + testHeight := int64(100) + slots := []stakingtypes.TimeHeightQueueSlot{ + {Time: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), Height: testHeight}, + {Time: time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), Height: testHeight}, // Different time, should NOT be deduplicated + {Time: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), Height: testHeight}, // Same time+height, should be deduplicated + } + + err := s.stakingKeeper.SetValidatorQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 2) // Should have 2 unique entries (different times) + + // Should be sorted by time first + s.Require().Equal(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), retrievedSlots[0].Time) + s.Require().Equal(testHeight, retrievedSlots[0].Height) + s.Require().Equal(time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), retrievedSlots[1].Time) + s.Require().Equal(testHeight, retrievedSlots[1].Height) +} + +func (s *KeeperTestSuite) TestRemoveValidatorQueuePendingSlot_FromEmptyList() { + // Remove from empty list should be no-op, not error + err := s.stakingKeeper.RemoveValidatorQueuePendingSlot(s.ctx, time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), 100) + s.Require().NoError(err) + + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Empty(slots) +} + +func (s *KeeperTestSuite) TestRemoveValidatorQueuePendingSlot_NonExistentEntry() { + // Add one slot + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + testHeight := int64(100) + err := s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, testTime, testHeight) + s.Require().NoError(err) + + // Try to remove non-existent entry + err = s.stakingKeeper.RemoveValidatorQueuePendingSlot(s.ctx, time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), 200) + s.Require().NoError(err) // Should be no-op, not error + + // Original slot should still be there + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) + s.Require().Equal(testTime, slots[0].Time) + s.Require().Equal(testHeight, slots[0].Height) +} + +func (s *KeeperTestSuite) TestRemoveValidatorQueuePendingSlot_MiddleEntry() { + // Add three slots + testTime1 := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + testHeight1 := int64(100) + testTime2 := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC) + testHeight2 := int64(200) + testTime3 := time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC) + testHeight3 := int64(300) + + err := s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, testTime1, testHeight1) + s.Require().NoError(err) + err = s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, testTime2, testHeight2) + s.Require().NoError(err) + err = s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, testTime3, testHeight3) + s.Require().NoError(err) + + // Remove middle entry + err = s.stakingKeeper.RemoveValidatorQueuePendingSlot(s.ctx, testTime2, testHeight2) + s.Require().NoError(err) + + // Verify only first and last remain + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 2) + s.Require().Equal(testTime1, slots[0].Time) + s.Require().Equal(testHeight1, slots[0].Height) + s.Require().Equal(testTime3, slots[1].Time) + s.Require().Equal(testHeight3, slots[1].Height) +} + +func (s *KeeperTestSuite) TestRemoveValidatorQueuePendingSlot_AllEntries() { + // Add two slots + testTime1 := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + testHeight1 := int64(100) + testTime2 := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC) + testHeight2 := int64(200) + + err := s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, testTime1, testHeight1) + s.Require().NoError(err) + err = s.stakingKeeper.AddValidatorQueuePendingSlot(s.ctx, testTime2, testHeight2) + s.Require().NoError(err) + + // Remove all entries + err = s.stakingKeeper.RemoveValidatorQueuePendingSlot(s.ctx, testTime1, testHeight1) + s.Require().NoError(err) + err = s.stakingKeeper.RemoveValidatorQueuePendingSlot(s.ctx, testTime2, testHeight2) + s.Require().NoError(err) + + // Key should be deleted (empty list) + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Empty(slots) +} + +func (s *KeeperTestSuite) TestSetValidatorQueuePendingSlots_TimeWithNanosecondPrecision() { + // Test with nanosecond precision + testTime := time.Date(2024, 1, 1, 12, 34, 56, 123456789, time.UTC) + testHeight := int64(100) + + slots := []stakingtypes.TimeHeightQueueSlot{ + {Time: testTime, Height: testHeight}, + } + + err := s.stakingKeeper.SetValidatorQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 1) + s.Require().Equal(testTime, retrievedSlots[0].Time) // Should preserve nanosecond precision + s.Require().Equal(testHeight, retrievedSlots[0].Height) +} + +// --- UBD Queue Tests --- + +func (s *KeeperTestSuite) TestGetUBDQueuePendingSlots_NoEntries() { + slots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Nil(slots) +} + +func (s *KeeperTestSuite) TestGetUBDQueuePendingSlots_SingleEntry() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + err := s.stakingKeeper.AddUBDQueuePendingSlot(s.ctx, testTime) + s.Require().NoError(err) + + slots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) + s.Require().Equal(testTime, slots[0]) +} + +func (s *KeeperTestSuite) TestGetUBDQueuePendingSlots_MultipleEntries() { + testTimes := []time.Time{ + time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), + } + + for _, t := range testTimes { + err := s.stakingKeeper.AddUBDQueuePendingSlot(s.ctx, t) + s.Require().NoError(err) + } + + slots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 3) + // Should be sorted + for i, slot := range slots { + s.Require().Equal(testTimes[i], slot) + } +} + +func (s *KeeperTestSuite) TestSetUBDQueuePendingSlots_EmptySlice() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + // Add a slot first + err := s.stakingKeeper.AddUBDQueuePendingSlot(s.ctx, testTime) + s.Require().NoError(err) + + // Set empty slice (should delete) + err = s.stakingKeeper.SetUBDQueuePendingSlots(s.ctx, []time.Time{}) + s.Require().NoError(err) + + // Verify it's deleted + slots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Empty(slots) +} + +func (s *KeeperTestSuite) TestSetUBDQueuePendingSlots_DuplicateEntries() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + // Set with duplicates + slots := []time.Time{ + testTime, + testTime, // duplicate + testTime, // duplicate + } + + err := s.stakingKeeper.SetUBDQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + // Verify only one entry persisted + retrievedSlots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 1) + s.Require().Equal(testTime, retrievedSlots[0]) +} + +func (s *KeeperTestSuite) TestSetUBDQueuePendingSlots_SingleEntry() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + slots := []time.Time{testTime} + + err := s.stakingKeeper.SetUBDQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 1) + s.Require().Equal(testTime, retrievedSlots[0]) +} + +func (s *KeeperTestSuite) TestSetUBDQueuePendingSlots_MultipleEntries() { + slots := []time.Time{ + time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), + } + + err := s.stakingKeeper.SetUBDQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 3) + // Should be sorted + s.Require().Equal(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), retrievedSlots[0]) + s.Require().Equal(time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), retrievedSlots[1]) + s.Require().Equal(time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), retrievedSlots[2]) +} + +func (s *KeeperTestSuite) TestAddUBDQueuePendingSlot() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + err := s.stakingKeeper.AddUBDQueuePendingSlot(s.ctx, testTime) + s.Require().NoError(err) + + slots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) + s.Require().Equal(testTime, slots[0]) + + // Adding the same time again should not create a duplicate + // Add calls Set internally, which deduplicates, so there should still be only 1 entry + err = s.stakingKeeper.AddUBDQueuePendingSlot(s.ctx, testTime) + s.Require().NoError(err) + + slots, err = s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) // Still only one entry due to deduplication in Set + s.Require().Equal(testTime, slots[0]) +} + +func (s *KeeperTestSuite) TestAddUBDQueuePendingSlot_MultipleEntries() { + testTimes := []time.Time{ + time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), + } + + // Add multiple entries + for _, t := range testTimes { + err := s.stakingKeeper.AddUBDQueuePendingSlot(s.ctx, t) + s.Require().NoError(err) + } + + // Verify all entries were added and sorted + slots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 3) + // Should be sorted + for i, slot := range slots { + s.Require().Equal(testTimes[i], slot) + } +} + +func (s *KeeperTestSuite) TestSetUBDQueuePendingSlots_TimeWithNanosecondPrecision() { + // Test with nanosecond precision + testTime := time.Date(2024, 1, 1, 12, 34, 56, 123456789, time.UTC) + + slots := []time.Time{testTime} + + err := s.stakingKeeper.SetUBDQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 1) + s.Require().Equal(testTime, retrievedSlots[0]) // Should preserve nanosecond precision +} + +// --- Redelegation Queue Tests --- + +func (s *KeeperTestSuite) TestGetRedelegationQueuePendingSlots_NoEntries() { + slots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Nil(slots) +} + +func (s *KeeperTestSuite) TestGetRedelegationQueuePendingSlots_SingleEntry() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + err := s.stakingKeeper.AddRedelegationQueuePendingSlot(s.ctx, testTime) + s.Require().NoError(err) + + slots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) + s.Require().Equal(testTime, slots[0]) +} + +func (s *KeeperTestSuite) TestGetRedelegationQueuePendingSlots_MultipleEntries() { + testTimes := []time.Time{ + time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), + } + + for _, t := range testTimes { + err := s.stakingKeeper.AddRedelegationQueuePendingSlot(s.ctx, t) + s.Require().NoError(err) + } + + slots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 3) + // Should be sorted + for i, slot := range slots { + s.Require().Equal(testTimes[i], slot) + } +} + +func (s *KeeperTestSuite) TestSetRedelegationQueuePendingSlots_EmptySlice() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + // Add a slot first + err := s.stakingKeeper.AddRedelegationQueuePendingSlot(s.ctx, testTime) + s.Require().NoError(err) + + // Set empty slice (should delete) + err = s.stakingKeeper.SetRedelegationQueuePendingSlots(s.ctx, []time.Time{}) + s.Require().NoError(err) + + // Verify it's deleted + slots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Empty(slots) +} + +func (s *KeeperTestSuite) TestSetRedelegationQueuePendingSlots_DuplicateEntries() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + // Set with duplicates + slots := []time.Time{ + testTime, + testTime, // duplicate + testTime, // duplicate + } + + err := s.stakingKeeper.SetRedelegationQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + // Verify only one entry persisted + retrievedSlots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 1) + s.Require().Equal(testTime, retrievedSlots[0]) +} + +func (s *KeeperTestSuite) TestSetRedelegationQueuePendingSlots_SingleEntry() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + slots := []time.Time{testTime} + + err := s.stakingKeeper.SetRedelegationQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 1) + s.Require().Equal(testTime, retrievedSlots[0]) +} + +func (s *KeeperTestSuite) TestSetRedelegationQueuePendingSlots_MultipleEntries() { + slots := []time.Time{ + time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), + } + + err := s.stakingKeeper.SetRedelegationQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 3) + // Should be sorted + s.Require().Equal(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), retrievedSlots[0]) + s.Require().Equal(time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), retrievedSlots[1]) + s.Require().Equal(time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), retrievedSlots[2]) +} + +func (s *KeeperTestSuite) TestAddRedelegationQueuePendingSlot() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + err := s.stakingKeeper.AddRedelegationQueuePendingSlot(s.ctx, testTime) + s.Require().NoError(err) + + slots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) + s.Require().Equal(testTime, slots[0]) + + // Adding the same time again should not create a duplicate + // Add calls Set internally, which deduplicates, so there should still be only 1 entry + err = s.stakingKeeper.AddRedelegationQueuePendingSlot(s.ctx, testTime) + s.Require().NoError(err) + + slots, err = s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) // Still only one entry due to deduplication in Set + s.Require().Equal(testTime, slots[0]) +} + +func (s *KeeperTestSuite) TestAddRedelegationQueuePendingSlot_MultipleEntries() { + testTimes := []time.Time{ + time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), + } + + // Add multiple entries + for _, t := range testTimes { + err := s.stakingKeeper.AddRedelegationQueuePendingSlot(s.ctx, t) + s.Require().NoError(err) + } + + // Verify all entries were added and sorted + slots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 3) + // Should be sorted + for i, slot := range slots { + s.Require().Equal(testTimes[i], slot) + } +} + +func (s *KeeperTestSuite) TestSetRedelegationQueuePendingSlots_TimeWithNanosecondPrecision() { + // Test with nanosecond precision + testTime := time.Date(2024, 1, 1, 12, 34, 56, 123456789, time.UTC) + + slots := []time.Time{testTime} + + err := s.stakingKeeper.SetRedelegationQueuePendingSlots(s.ctx, slots) + s.Require().NoError(err) + + retrievedSlots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(retrievedSlots, 1) + s.Require().Equal(testTime, retrievedSlots[0]) // Should preserve nanosecond precision +} diff --git a/x/staking/keeper/validator.go b/x/staking/keeper/validator.go index 04f5fe3b0d1f..4fc1380a054c 100644 --- a/x/staking/keeper/validator.go +++ b/x/staking/keeper/validator.go @@ -474,7 +474,10 @@ func (k Keeper) SetUnbondingValidatorsQueue(ctx context.Context, endTime time.Ti if err != nil { return err } - return store.Set(types.GetValidatorQueueKey(endTime, endHeight), bz) + if err = store.Set(types.GetValidatorQueueKey(endTime, endHeight), bz); err != nil { + return err + } + return k.AddValidatorQueuePendingSlot(ctx, endTime, endHeight) } // InsertUnbondingValidatorQueue inserts a given unbonding validator address into @@ -492,7 +495,10 @@ func (k Keeper) InsertUnbondingValidatorQueue(ctx context.Context, val types.Val // given height and time. func (k Keeper) DeleteValidatorQueueTimeSlice(ctx context.Context, endTime time.Time, endHeight int64) error { store := k.storeService.OpenKVStore(ctx) - return store.Delete(types.GetValidatorQueueKey(endTime, endHeight)) + if err := store.Delete(types.GetValidatorQueueKey(endTime, endHeight)); err != nil { + return err + } + return k.RemoveValidatorQueuePendingSlot(ctx, endTime, endHeight) } // DeleteValidatorQueue removes a validator by address from the unbonding queue @@ -529,90 +535,88 @@ func (k Keeper) DeleteValidatorQueue(ctx context.Context, val types.Validator) e return k.SetUnbondingValidatorsQueue(ctx, val.UnbondingTime, val.UnbondingHeight, newAddrs) } -// ValidatorQueueIterator returns an iterator ranging over validators that are -// unbonding whose unbonding completion occurs at the given height and time. -func (k Keeper) ValidatorQueueIterator(ctx context.Context, endTime time.Time, endHeight int64) (corestore.Iterator, error) { - store := k.storeService.OpenKVStore(ctx) - return store.Iterator(types.ValidatorQueueKey, storetypes.InclusiveEndBytes(types.GetValidatorQueueKey(endTime, endHeight))) -} - // UnbondAllMatureValidators unbonds all the mature unbonding validators that -// have finished their unbonding period. +// have finished their unbonding period. Uses the pending-slot index (populated by +// Migrate5to6); no iterator is used in end-block. func (k Keeper) UnbondAllMatureValidators(ctx context.Context) error { sdkCtx := sdk.UnwrapSDKContext(ctx) blockTime := sdkCtx.BlockTime() blockHeight := sdkCtx.BlockHeight() - // unbondingValIterator will contain all validator addresses indexed under - // the ValidatorQueueKey prefix. Note, the entire index key is composed as - // ValidatorQueueKey | timeBzLen (8-byte big endian) | timeBz | heightBz (8-byte big endian), - // so it may be possible that certain validator addresses that are iterated - // over are not ready to unbond, so an explicit check is required. - unbondingValIterator, err := k.ValidatorQueueIterator(ctx, blockTime, blockHeight) + store := k.storeService.OpenKVStore(ctx) + slots, err := k.GetValidatorQueuePendingSlots(ctx) if err != nil { return err } - defer unbondingValIterator.Close() + if len(slots) == 0 { + return nil + } - for ; unbondingValIterator.Valid(); unbondingValIterator.Next() { - key := unbondingValIterator.Key() - keyTime, keyHeight, err := types.ParseValidatorQueueKey(key) + for _, slot := range slots { + if slot.Height > blockHeight || slot.Time.After(blockTime) { + continue + } + queueKey := types.GetValidatorQueueKey(slot.Time, slot.Height) + bz, err := store.Get(queueKey) if err != nil { - return fmt.Errorf("failed to parse unbonding key: %w", err) + return err + } + if bz == nil { + // already processed and deleted; remove from pending + err := k.RemoveValidatorQueuePendingSlot(ctx, slot.Time, slot.Height) + if err != nil { + return err + } + continue } - // All addresses for the given key have the same unbonding height and time. - // We only unbond if the height and time are less than the current height - // and time. - if keyHeight <= blockHeight && (keyTime.Before(blockTime) || keyTime.Equal(blockTime)) { - addrs := types.ValAddresses{} - if err = k.cdc.Unmarshal(unbondingValIterator.Value(), &addrs); err != nil { + addrs := types.ValAddresses{} + if err = k.cdc.Unmarshal(bz, &addrs); err != nil { + return err + } + + for _, valAddr := range addrs.Addresses { + addr, err := k.validatorAddressCodec.StringToBytes(valAddr) + if err != nil { return err } + val, err := k.GetValidator(ctx, addr) + if err != nil { + return errorsmod.Wrap(err, "validator in the unbonding queue was not found") + } - for _, valAddr := range addrs.Addresses { - addr, err := k.validatorAddressCodec.StringToBytes(valAddr) - if err != nil { - return err - } - val, err := k.GetValidator(ctx, addr) - if err != nil { - return errorsmod.Wrap(err, "validator in the unbonding queue was not found") - } + if !val.IsUnbonding() { + return fmt.Errorf("unexpected validator in unbonding queue; status was not unbonding") + } - if !val.IsUnbonding() { - return fmt.Errorf("unexpected validator in unbonding queue; status was not unbonding") + if val.UnbondingOnHoldRefCount == 0 { + for _, id := range val.UnbondingIds { + if err = k.DeleteUnbondingIndex(ctx, id); err != nil { + return err + } } - if val.UnbondingOnHoldRefCount == 0 { - for _, id := range val.UnbondingIds { - if err = k.DeleteUnbondingIndex(ctx, id); err != nil { - return err - } - } + val, err = k.UnbondingToUnbonded(ctx, val) + if err != nil { + return err + } - val, err = k.UnbondingToUnbonded(ctx, val) + if val.GetDelegatorShares().IsZero() { + str, err := k.validatorAddressCodec.StringToBytes(val.GetOperator()) if err != nil { return err } - - if val.GetDelegatorShares().IsZero() { - str, err := k.validatorAddressCodec.StringToBytes(val.GetOperator()) - if err != nil { - return err - } - if err = k.RemoveValidator(ctx, str); err != nil { - return err - } - } else { - // remove unbonding ids - val.UnbondingIds = []uint64{} - } - - // remove validator from queue - if err = k.DeleteValidatorQueue(ctx, val); err != nil { + if err = k.RemoveValidator(ctx, str); err != nil { return err } + } else { + // remove unbonding ids + val.UnbondingIds = []uint64{} + } + + // remove validator from queue (and from pending when timeslice is deleted) + if err = k.DeleteValidatorQueue(ctx, val); err != nil { + return err } } } diff --git a/x/staking/keeper/validator_test.go b/x/staking/keeper/validator_test.go index b3c48e759344..63902ab83fa9 100644 --- a/x/staking/keeper/validator_test.go +++ b/x/staking/keeper/validator_test.go @@ -440,3 +440,225 @@ func (s *KeeperTestSuite) TestUnbondingValidator() { require.NoError(err) require.Equal(stakingtypes.Unbonded, validator.Status) } + +// TestUnbondAllMatureValidators_PendingSlotCleanup verifies that pending slots are properly +// cleaned up when validators are unbonded, ensuring consistency between queue keys and +// pending slot index. +func (s *KeeperTestSuite) TestUnbondAllMatureValidators_PendingSlotCleanup() { + ctx, keeper := s.ctx, s.stakingKeeper + require := s.Require() + + // Create two different slots + slot1Time := ctx.BlockTime().Add(time.Hour) + slot1Height := ctx.BlockHeight() + 10 + + slot2Time := ctx.BlockTime().Add(2 * time.Hour) + slot2Height := ctx.BlockHeight() + 20 + + // Create multiple validators with different unbonding times/heights + valPubKey0 := PKs[0] + valAddr0 := sdk.ValAddress(valPubKey0.Address().Bytes()) + validator0 := testutil.NewValidator(s.T(), valAddr0, valPubKey0) + validator0, _ = validator0.AddTokensFromDel(keeper.TokensFromConsensusPower(ctx, 10)) + validator0.Status = stakingtypes.Unbonding + validator0.UnbondingTime = slot1Time + validator0.UnbondingHeight = slot1Height + + valPubKey1 := PKs[1] + valAddr1 := sdk.ValAddress(valPubKey1.Address().Bytes()) + validator1 := testutil.NewValidator(s.T(), valAddr1, valPubKey1) + validator1, _ = validator1.AddTokensFromDel(keeper.TokensFromConsensusPower(ctx, 10)) + validator1.Status = stakingtypes.Unbonding + validator1.UnbondingTime = slot1Time + validator1.UnbondingHeight = slot1Height + + valPubKey2 := PKs[2] + valAddr2 := sdk.ValAddress(valPubKey2.Address().Bytes()) + validator2 := testutil.NewValidator(s.T(), valAddr2, valPubKey2) + validator2, _ = validator2.AddTokensFromDel(keeper.TokensFromConsensusPower(ctx, 10)) + validator2.Status = stakingtypes.Unbonding + validator2.UnbondingTime = slot2Time + validator2.UnbondingHeight = slot2Height + + // Set up validators in the store + require.NoError(keeper.SetValidator(ctx, validator0)) + require.NoError(keeper.SetValidator(ctx, validator1)) + require.NoError(keeper.SetValidator(ctx, validator2)) + + // Add validators to different slots + // Slot 1: validator0 and validator1 + require.NoError(keeper.SetUnbondingValidatorsQueue(ctx, slot1Time, slot1Height, []string{ + valAddr0.String(), + valAddr1.String(), + })) + + // Slot 2: validator2 + require.NoError(keeper.SetUnbondingValidatorsQueue(ctx, slot2Time, slot2Height, []string{ + valAddr2.String(), + })) + + // Verify pending slots are populated + slots, err := keeper.GetValidatorQueuePendingSlots(ctx) + require.NoError(err) + require.Len(slots, 2) + + // Verify both slots are in pending + foundSlot1 := false + foundSlot2 := false + for _, slot := range slots { + if slot.Time.Equal(slot1Time) && slot.Height == slot1Height { + foundSlot1 = true + } + if slot.Time.Equal(slot2Time) && slot.Height == slot2Height { + foundSlot2 = true + } + } + require.True(foundSlot1, "slot1 should be in pending slots") + require.True(foundSlot2, "slot2 should be in pending slots") + + // Advance time and height to make slot1 mature + ctx = ctx.WithBlockTime(slot1Time).WithBlockHeight(slot1Height) + + // Unbond mature validators (slot1 should be processed) + require.NoError(keeper.UnbondAllMatureValidators(ctx)) + + // Verify slot1 validators are unbonded + val0, err := keeper.GetValidator(ctx, valAddr0) + require.NoError(err) + require.Equal(stakingtypes.Unbonded, val0.Status) + + val1, err := keeper.GetValidator(ctx, valAddr1) + require.NoError(err) + require.Equal(stakingtypes.Unbonded, val1.Status) + + // Verify slot1 is removed from pending slots (since it became empty) + slots, err = keeper.GetValidatorQueuePendingSlots(ctx) + require.NoError(err) + require.Len(slots, 1, "slot1 should be removed from pending after becoming empty") + + // Verify slot2 is still in pending (not mature yet) + foundSlot2 = false + for _, slot := range slots { + if slot.Time.Equal(slot2Time) && slot.Height == slot2Height { + foundSlot2 = true + } + } + require.True(foundSlot2, "slot2 should still be in pending slots") + + // Verify slot1 queue key is deleted (GetUnbondingValidators should return empty) + vals, err := keeper.GetUnbondingValidators(ctx, slot1Time, slot1Height) + require.NoError(err) + require.Empty(vals, "slot1 queue key should be deleted") + + // Advance to make slot2 mature + ctx = ctx.WithBlockTime(slot2Time).WithBlockHeight(slot2Height) + + // Unbond mature validators (slot2 should be processed) + require.NoError(keeper.UnbondAllMatureValidators(ctx)) + + // Verify slot2 validator is unbonded + val2, err := keeper.GetValidator(ctx, valAddr2) + require.NoError(err) + require.Equal(stakingtypes.Unbonded, val2.Status) + + // Verify slot2 is removed from pending slots + slots, err = keeper.GetValidatorQueuePendingSlots(ctx) + require.NoError(err) + require.Empty(slots, "all slots should be removed from pending after processing") + + // Verify slot2 queue key is deleted (GetUnbondingValidators should return empty) + vals, err = keeper.GetUnbondingValidators(ctx, slot2Time, slot2Height) + require.NoError(err) + require.Empty(vals, "slot2 queue key should be deleted") +} + +// TestUnbondAllMatureValidators_PendingSlotCleanup_MultipleValidatorsInSlot verifies +// that when a slot has multiple validators, the slot is only removed from pending +// when all validators are unbonded. +func (s *KeeperTestSuite) TestUnbondAllMatureValidators_PendingSlotCleanup_MultipleValidatorsInSlot() { + ctx, keeper := s.ctx, s.stakingKeeper + require := s.Require() + + slotTime := ctx.BlockTime().Add(time.Hour) + slotHeight := ctx.BlockHeight() + 10 + + valPubKey0 := PKs[0] + valAddr0 := sdk.ValAddress(valPubKey0.Address().Bytes()) + validator0 := testutil.NewValidator(s.T(), valAddr0, valPubKey0) + validator0, _ = validator0.AddTokensFromDel(keeper.TokensFromConsensusPower(ctx, 10)) + validator0.Status = stakingtypes.Unbonding + validator0.UnbondingTime = slotTime + validator0.UnbondingHeight = slotHeight + + valPubKey1 := PKs[1] + valAddr1 := sdk.ValAddress(valPubKey1.Address().Bytes()) + validator1 := testutil.NewValidator(s.T(), valAddr1, valPubKey1) + validator1, _ = validator1.AddTokensFromDel(keeper.TokensFromConsensusPower(ctx, 10)) + validator1.Status = stakingtypes.Unbonding + validator1.UnbondingTime = slotTime + validator1.UnbondingHeight = slotHeight + + require.NoError(keeper.SetValidator(ctx, validator0)) + require.NoError(keeper.SetValidator(ctx, validator1)) + + // Add both validators to the same slot + require.NoError(keeper.SetUnbondingValidatorsQueue(ctx, slotTime, slotHeight, []string{ + valAddr0.String(), + valAddr1.String(), + })) + + // Verify slot is in pending + slots, err := keeper.GetValidatorQueuePendingSlots(ctx) + require.NoError(err) + require.Len(slots, 1) + + // Advance to make slot mature + ctx = ctx.WithBlockTime(slotTime).WithBlockHeight(slotHeight) + + // Unbond mature validators - both should be processed + require.NoError(keeper.UnbondAllMatureValidators(ctx)) + + // Verify both validators are unbonded + val0, err := keeper.GetValidator(ctx, valAddr0) + require.NoError(err) + require.Equal(stakingtypes.Unbonded, val0.Status) + + val1, err := keeper.GetValidator(ctx, valAddr1) + require.NoError(err) + require.Equal(stakingtypes.Unbonded, val1.Status) + + // Verify slot is removed from pending (all validators unbonded) + slots, err = keeper.GetValidatorQueuePendingSlots(ctx) + require.NoError(err) + require.Empty(slots, "slot should be removed after all validators are unbonded") +} + +// TestUnbondAllMatureValidators_PendingSlotCleanup_AlreadyDeletedSlot verifies +// that already-deleted slots are properly cleaned up from pending. +func (s *KeeperTestSuite) TestUnbondAllMatureValidators_PendingSlotCleanup_AlreadyDeletedSlot() { + ctx, keeper := s.ctx, s.stakingKeeper + require := s.Require() + + slotTime := ctx.BlockTime().Add(time.Hour) + slotHeight := ctx.BlockHeight() + 10 + + // Manually add a slot to pending without creating the queue entry + // This simulates a scenario where the queue key was deleted but pending slot wasn't updated + require.NoError(keeper.AddValidatorQueuePendingSlot(ctx, slotTime, slotHeight)) + + // Verify slot is in pending + slots, err := keeper.GetValidatorQueuePendingSlots(ctx) + require.NoError(err) + require.Len(slots, 1) + + // Advance to make slot "mature" + ctx = ctx.WithBlockTime(slotTime).WithBlockHeight(slotHeight) + + // UnbondAllMatureValidators should handle the missing queue key gracefully + require.NoError(keeper.UnbondAllMatureValidators(ctx)) + + // Verify the orphaned pending slot is cleaned up + slots, err = keeper.GetValidatorQueuePendingSlots(ctx) + require.NoError(err) + require.Empty(slots, "orphaned pending slot should be cleaned up") +} diff --git a/x/staking/migrations/v6/migrations_test.go b/x/staking/migrations/v6/migrations_test.go new file mode 100644 index 000000000000..dda9ea3234c1 --- /dev/null +++ b/x/staking/migrations/v6/migrations_test.go @@ -0,0 +1,376 @@ +package v6_test + +import ( + "testing" + "time" + + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmttime "github.com/cometbft/cometbft/types/time" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" + + storetypes "cosmossdk.io/core/store" + storetypesv1 "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/address" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + v6 "github.com/cosmos/cosmos-sdk/x/staking/migrations/v6" + stakingtestutil "github.com/cosmos/cosmos-sdk/x/staking/testutil" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +var ( + bondedAcc = authtypes.NewEmptyModuleAccount(stakingtypes.BondedPoolName) + notBondedAcc = authtypes.NewEmptyModuleAccount(stakingtypes.NotBondedPoolName) +) + +type MigrationsTestSuite struct { + suite.Suite + + ctx sdk.Context + stakingKeeper *stakingkeeper.Keeper + bankKeeper *stakingtestutil.MockBankKeeper + accountKeeper *stakingtestutil.MockAccountKeeper + storeService storetypes.KVStoreService + cdc codec.BinaryCodec +} + +func (s *MigrationsTestSuite) SetupTest() { + require := s.Require() + key := storetypesv1.NewKVStoreKey(stakingtypes.StoreKey) + storeService := runtime.NewKVStoreService(key) + testCtx := testutil.DefaultContextWithDB(s.T(), key, storetypesv1.NewTransientStoreKey("transient_test")) + ctx := testCtx.Ctx.WithBlockHeader(cmtproto.Header{Time: cmttime.Now()}) + encCfg := moduletestutil.MakeTestEncodingConfig() + + ctrl := gomock.NewController(s.T()) + accountKeeper := stakingtestutil.NewMockAccountKeeper(ctrl) + accountKeeper.EXPECT().GetModuleAddress(stakingtypes.BondedPoolName).Return(bondedAcc.GetAddress()) + accountKeeper.EXPECT().GetModuleAddress(stakingtypes.NotBondedPoolName).Return(notBondedAcc.GetAddress()) + accountKeeper.EXPECT().AddressCodec().Return(address.NewBech32Codec("cosmos")).AnyTimes() + + bankKeeper := stakingtestutil.NewMockBankKeeper(ctrl) + + keeper := stakingkeeper.NewKeeper( + encCfg.Codec, + storeService, + accountKeeper, + bankKeeper, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + address.NewBech32Codec("cosmosvaloper"), + address.NewBech32Codec("cosmosvalcons"), + ) + require.NoError(keeper.SetParams(ctx, stakingtypes.DefaultParams())) + + s.ctx = ctx + s.stakingKeeper = keeper + s.bankKeeper = bankKeeper + s.accountKeeper = accountKeeper + s.storeService = storeService + s.cdc = encCfg.Codec +} + +func TestMigrationsTestSuite(t *testing.T) { + suite.Run(t, new(MigrationsTestSuite)) +} + +// setValidatorQueueEntryPreMigration sets a validator queue entry in the old format (pre-migration) +// for testing migration functions. +func (s *MigrationsTestSuite) setValidatorQueueEntryPreMigration(endTime time.Time, endHeight int64, addrs []string) error { + store := s.storeService.OpenKVStore(s.ctx) + bz, err := s.cdc.Marshal(&stakingtypes.ValAddresses{Addresses: addrs}) + if err != nil { + return err + } + return store.Set(stakingtypes.GetValidatorQueueKey(endTime, endHeight), bz) +} + +// setUBDQueueEntryPreMigration sets a UBD queue entry in the old format (pre-migration) +// for testing migration functions. +func (s *MigrationsTestSuite) setUBDQueueEntryPreMigration(timestamp time.Time) error { + store := s.storeService.OpenKVStore(s.ctx) + timeBz := sdk.FormatTimeBytes(timestamp) + return store.Set(stakingtypes.GetUnbondingDelegationTimeKey(timestamp), timeBz) +} + +// setRedelegationQueueEntryPreMigration sets a redelegation queue entry in the old format (pre-migration) +// for testing migration functions. +func (s *MigrationsTestSuite) setRedelegationQueueEntryPreMigration(timestamp time.Time) error { + store := s.storeService.OpenKVStore(s.ctx) + timeBz := sdk.FormatTimeBytes(timestamp) + return store.Set(stakingtypes.GetRedelegationTimeKey(timestamp), timeBz) +} + +// migrateStore is a helper function that calls v6.MigrateStore with the correct parameters +func (s *MigrationsTestSuite) migrateStore() error { + store := s.storeService.OpenKVStore(s.ctx) + return v6.MigrateStore(s.ctx, store, s.stakingKeeper) +} + +// TestMigrateStore_NoEntries tests migration with 0 entries +func (s *MigrationsTestSuite) TestMigrateStore_NoEntries() { + err := s.migrateStore() + s.Require().NoError(err) + + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Empty(slots) + + ubdSlots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Empty(ubdSlots) + + redSlots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Empty(redSlots) +} + +// TestMigrateStore_AllQueues tests migration with multiple entries in all queues +func (s *MigrationsTestSuite) TestMigrateStore_AllQueues() { + valTimes := []time.Time{ + time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), + } + valHeights := []int64{100, 200, 300} + ubdTimes := []time.Time{ + time.Date(2024, 1, 4, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 5, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 6, 0, 0, 0, 0, time.UTC), + } + redTimes := []time.Time{ + time.Date(2024, 1, 7, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 8, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 9, 0, 0, 0, 0, time.UTC), + } + + // Set up old format queue entries directly in store (pre-migration format) + for i, t := range valTimes { + err := s.setValidatorQueueEntryPreMigration(t, valHeights[i], []string{"cosmosvaloper1abc123"}) + s.Require().NoError(err) + } + for _, t := range ubdTimes { + err := s.setUBDQueueEntryPreMigration(t) + s.Require().NoError(err) + } + for _, t := range redTimes { + err := s.setRedelegationQueueEntryPreMigration(t) + s.Require().NoError(err) + } + + // Run migration + err := s.migrateStore() + s.Require().NoError(err) + + // Verify all pending slots were populated + valSlots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(valSlots, 3) + s.Require().Equal(valTimes[0], valSlots[0].Time) + s.Require().Equal(valHeights[0], valSlots[0].Height) + s.Require().Equal(valTimes[1], valSlots[1].Time) + s.Require().Equal(valHeights[1], valSlots[1].Height) + s.Require().Equal(valTimes[2], valSlots[2].Time) + s.Require().Equal(valHeights[2], valSlots[2].Height) + + ubdSlots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(ubdSlots, 3) + s.Require().Equal(ubdTimes[0], ubdSlots[0]) + s.Require().Equal(ubdTimes[1], ubdSlots[1]) + s.Require().Equal(ubdTimes[2], ubdSlots[2]) + + redSlots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(redSlots, 3) + s.Require().Equal(redTimes[0], redSlots[0]) + s.Require().Equal(redTimes[1], redSlots[1]) + s.Require().Equal(redTimes[2], redSlots[2]) +} + +// TestMigrateStore_ValidatorQueue_NoEntries tests migration with 0 validator queue entries +func (s *MigrationsTestSuite) TestMigrateStore_ValidatorQueue_NoEntries() { + err := s.migrateStore() + s.Require().NoError(err) + + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Empty(slots) +} + +// TestMigrateStore_ValidatorQueue_SingleEntry tests migration with 1 validator queue entry +func (s *MigrationsTestSuite) TestMigrateStore_ValidatorQueue_SingleEntry() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + testHeight := int64(100) + + // Set up old format queue entry directly in store (pre-migration format) + err := s.setValidatorQueueEntryPreMigration(testTime, testHeight, []string{"cosmosvaloper1abc123"}) + s.Require().NoError(err) + + // Run migration + err = s.migrateStore() + s.Require().NoError(err) + + // Verify pending slots were populated + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) + s.Require().Equal(testTime, slots[0].Time) + s.Require().Equal(testHeight, slots[0].Height) +} + +// TestMigrateStore_ValidatorQueue_MultipleEntries tests migration with multiple validator queue entries +func (s *MigrationsTestSuite) TestMigrateStore_ValidatorQueue_MultipleEntries() { + testTimes := []time.Time{ + time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), + } + testHeights := []int64{100, 200, 300} + + // Set up old format queue entries directly in store (pre-migration format) + for i, t := range testTimes { + err := s.setValidatorQueueEntryPreMigration(t, testHeights[i], []string{"cosmosvaloper1abc123"}) + s.Require().NoError(err) + } + + // Run migration + err := s.migrateStore() + s.Require().NoError(err) + + // Verify pending slots were populated + slots, err := s.stakingKeeper.GetValidatorQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 3) + + // Slots should be sorted by time, then height + s.Require().Equal(testTimes[0], slots[0].Time) + s.Require().Equal(testHeights[0], slots[0].Height) + s.Require().Equal(testTimes[1], slots[1].Time) + s.Require().Equal(testHeights[1], slots[1].Height) + s.Require().Equal(testTimes[2], slots[2].Time) + s.Require().Equal(testHeights[2], slots[2].Height) +} + +// TestMigrateStore_UBDQueue_NoEntries tests migration with 0 UBD queue entries +func (s *MigrationsTestSuite) TestMigrateStore_UBDQueue_NoEntries() { + err := s.migrateStore() + s.Require().NoError(err) + + slots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Empty(slots) +} + +// TestMigrateStore_UBDQueue_SingleEntry tests migration with 1 UBD queue entry +func (s *MigrationsTestSuite) TestMigrateStore_UBDQueue_SingleEntry() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + // Set up old format queue entry directly in store (pre-migration format) + err := s.setUBDQueueEntryPreMigration(testTime) + s.Require().NoError(err) + + // Run migration + err = s.migrateStore() + s.Require().NoError(err) + + // Verify pending slots were populated + slots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) + s.Require().Equal(testTime, slots[0]) +} + +// TestMigrateStore_UBDQueue_MultipleEntries tests migration with multiple UBD queue entries +func (s *MigrationsTestSuite) TestMigrateStore_UBDQueue_MultipleEntries() { + testTimes := []time.Time{ + time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), + } + + // Set up old format queue entries directly in store (pre-migration format) + for _, t := range testTimes { + err := s.setUBDQueueEntryPreMigration(t) + s.Require().NoError(err) + } + + // Run migration + err := s.migrateStore() + s.Require().NoError(err) + + // Verify pending slots were populated + slots, err := s.stakingKeeper.GetUBDQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 3) + + // Slots should be sorted by time + s.Require().Equal(testTimes[0], slots[0]) + s.Require().Equal(testTimes[1], slots[1]) + s.Require().Equal(testTimes[2], slots[2]) +} + +// TestMigrateStore_RedelegationQueue_NoEntries tests migration with 0 redelegation queue entries +func (s *MigrationsTestSuite) TestMigrateStore_RedelegationQueue_NoEntries() { + err := s.migrateStore() + s.Require().NoError(err) + + slots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Empty(slots) +} + +// TestMigrateStore_RedelegationQueue_SingleEntry tests migration with 1 redelegation queue entry +func (s *MigrationsTestSuite) TestMigrateStore_RedelegationQueue_SingleEntry() { + testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + // Set up old format queue entry directly in store (pre-migration format) + err := s.setRedelegationQueueEntryPreMigration(testTime) + s.Require().NoError(err) + + // Run migration + err = s.migrateStore() + s.Require().NoError(err) + + // Verify pending slots were populated + slots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 1) + s.Require().Equal(testTime, slots[0]) +} + +// TestMigrateStore_RedelegationQueue_MultipleEntries tests migration with multiple redelegation queue entries +func (s *MigrationsTestSuite) TestMigrateStore_RedelegationQueue_MultipleEntries() { + testTimes := []time.Time{ + time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), + time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), + } + + // Set up old format queue entries directly in store (pre-migration format) + for _, t := range testTimes { + err := s.setRedelegationQueueEntryPreMigration(t) + s.Require().NoError(err) + } + + // Run migration + err := s.migrateStore() + s.Require().NoError(err) + + // Verify pending slots were populated + slots, err := s.stakingKeeper.GetRedelegationQueuePendingSlots(s.ctx) + s.Require().NoError(err) + s.Require().Len(slots, 3) + + // Slots should be sorted by time + s.Require().Equal(testTimes[0], slots[0]) + s.Require().Equal(testTimes[1], slots[1]) + s.Require().Equal(testTimes[2], slots[2]) +} diff --git a/x/staking/migrations/v6/store.go b/x/staking/migrations/v6/store.go new file mode 100644 index 000000000000..c42ca16c9525 --- /dev/null +++ b/x/staking/migrations/v6/store.go @@ -0,0 +1,103 @@ +package v6 + +import ( + "context" + "fmt" + "time" + + storetypes "cosmossdk.io/core/store" + storetypesv1 "cosmossdk.io/store/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +// QueuePendingSlotSetter defines the interface for setting queue pending slots. +// This interface is implemented by keeper.Keeper to avoid import cycles. +type QueuePendingSlotSetter interface { + SetValidatorQueuePendingSlots(ctx context.Context, slots []types.TimeHeightQueueSlot) error + SetUBDQueuePendingSlots(ctx context.Context, slots []time.Time) error + SetRedelegationQueuePendingSlots(ctx context.Context, slots []time.Time) error +} + +// MigrateStore performs in-place store migrations from v5 to v6 by +// populating the queue pending-slot indexes (validator, UBD, redelegation) +// from current queue state. This avoids expensive full-range iteration in +// end-block on the first block after upgrade. +func MigrateStore( + ctx sdk.Context, + store storetypes.KVStore, + k QueuePendingSlotSetter, +) error { + if err := PopulateValidatorQueuePendingFromIterator(ctx, store, k.SetValidatorQueuePendingSlots); err != nil { + return err + } + if err := PopulateUBDQueuePendingFromIterator(ctx, store, k.SetUBDQueuePendingSlots); err != nil { + return err + } + return PopulateRedelegationQueuePendingFromIterator(ctx, store, k.SetRedelegationQueuePendingSlots) +} + +func PopulateValidatorQueuePendingFromIterator( + ctx context.Context, + store storetypes.KVStore, + setter func(context.Context, []types.TimeHeightQueueSlot) error, +) error { + iter, err := store.Iterator(types.ValidatorQueueKey, storetypesv1.PrefixEndBytes(types.ValidatorQueueKey)) + if err != nil { + return err + } + defer iter.Close() + var slots []types.TimeHeightQueueSlot + for ; iter.Valid(); iter.Next() { + keyTime, keyHeight, err := types.ParseValidatorQueueKey(iter.Key()) + if err != nil { + return err + } + slots = append(slots, types.TimeHeightQueueSlot{Time: keyTime, Height: keyHeight}) + } + return setter(ctx, slots) +} + +func populateTimeQueuePendingFromIterator( + ctx context.Context, + store storetypes.KVStore, + queueKey []byte, + setter func(context.Context, []time.Time) error, +) error { + iter, err := store.Iterator(queueKey, storetypesv1.PrefixEndBytes(queueKey)) + if err != nil { + return err + } + defer iter.Close() + var slots []time.Time + for ; iter.Valid(); iter.Next() { + key := iter.Key() + if len(key) <= len(queueKey) { + return fmt.Errorf("key length is too short") + } + timeBz := key[len(queueKey):] + t, err := sdk.ParseTimeBytes(timeBz) + if err != nil { + return fmt.Errorf("unable to parse time from queue key %x: %w", key, err) + } + slots = append(slots, t) + } + return setter(ctx, slots) +} + +func PopulateUBDQueuePendingFromIterator( + ctx context.Context, + store storetypes.KVStore, + setter func(context.Context, []time.Time) error, +) error { + return populateTimeQueuePendingFromIterator(ctx, store, types.UnbondingQueueKey, setter) +} + +func PopulateRedelegationQueuePendingFromIterator( + ctx context.Context, + store storetypes.KVStore, + setter func(context.Context, []time.Time) error, +) error { + return populateTimeQueuePendingFromIterator(ctx, store, types.RedelegationQueueKey, setter) +} diff --git a/x/staking/module.go b/x/staking/module.go index 427ff607afae..bca579db1c1e 100644 --- a/x/staking/module.go +++ b/x/staking/module.go @@ -35,7 +35,7 @@ import ( ) const ( - consensusVersion uint64 = 5 + consensusVersion uint64 = 6 ) var ( @@ -151,6 +151,9 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { if err := cfg.RegisterMigration(types.ModuleName, 4, m.Migrate4to5); err != nil { panic(fmt.Sprintf("failed to migrate x/%s from version 4 to 5: %v", types.ModuleName, err)) } + if err := cfg.RegisterMigration(types.ModuleName, 5, m.Migrate5to6); err != nil { + panic(fmt.Sprintf("failed to migrate x/%s from version 5 to 6: %v", types.ModuleName, err)) + } } // InitGenesis performs genesis initialization for the staking module. diff --git a/x/staking/types/errors.go b/x/staking/types/errors.go index 585a9e8e83ca..e064e4475258 100644 --- a/x/staking/types/errors.go +++ b/x/staking/types/errors.go @@ -4,48 +4,50 @@ import "cosmossdk.io/errors" // x/staking module sentinel errors var ( - ErrEmptyValidatorAddr = errors.Register(ModuleName, 2, "empty validator address") - ErrNoValidatorFound = errors.Register(ModuleName, 3, "validator does not exist") - ErrValidatorOwnerExists = errors.Register(ModuleName, 4, "validator already exist for this operator address; must use new validator operator address") - ErrValidatorPubKeyExists = errors.Register(ModuleName, 5, "validator already exist for this pubkey; must use new validator pubkey") - ErrValidatorPubKeyTypeNotSupported = errors.Register(ModuleName, 6, "validator pubkey type is not supported") - ErrValidatorJailed = errors.Register(ModuleName, 7, "validator for this address is currently jailed") - ErrBadRemoveValidator = errors.Register(ModuleName, 8, "failed to remove validator") - ErrCommissionNegative = errors.Register(ModuleName, 9, "commission must be positive") - ErrCommissionHuge = errors.Register(ModuleName, 10, "commission cannot be more than 100%") - ErrCommissionGTMaxRate = errors.Register(ModuleName, 11, "commission cannot be more than the max rate") - ErrCommissionUpdateTime = errors.Register(ModuleName, 12, "commission cannot be changed more than once in 24h") - ErrCommissionChangeRateNegative = errors.Register(ModuleName, 13, "commission change rate must be positive") - ErrCommissionChangeRateGTMaxRate = errors.Register(ModuleName, 14, "commission change rate cannot be more than the max rate") - ErrCommissionGTMaxChangeRate = errors.Register(ModuleName, 15, "commission cannot be changed more than max change rate") - ErrSelfDelegationBelowMinimum = errors.Register(ModuleName, 16, "validator's self delegation must be greater than their minimum self delegation") - ErrMinSelfDelegationDecreased = errors.Register(ModuleName, 17, "minimum self delegation cannot be decreased") - ErrEmptyDelegatorAddr = errors.Register(ModuleName, 18, "empty delegator address") - ErrNoDelegation = errors.Register(ModuleName, 19, "no delegation for (address, validator) tuple") - ErrBadDelegatorAddr = errors.Register(ModuleName, 20, "delegator does not exist with address") - ErrNoDelegatorForAddress = errors.Register(ModuleName, 21, "delegator does not contain delegation") - ErrInsufficientShares = errors.Register(ModuleName, 22, "insufficient delegation shares") - ErrDelegationValidatorEmpty = errors.Register(ModuleName, 23, "cannot delegate to an empty validator") - ErrNotEnoughDelegationShares = errors.Register(ModuleName, 24, "not enough delegation shares") - ErrNotMature = errors.Register(ModuleName, 25, "entry not mature") - ErrNoUnbondingDelegation = errors.Register(ModuleName, 26, "no unbonding delegation found") - ErrMaxUnbondingDelegationEntries = errors.Register(ModuleName, 27, "too many unbonding delegation entries for (delegator, validator) tuple") - ErrNoRedelegation = errors.Register(ModuleName, 28, "no redelegation found") - ErrSelfRedelegation = errors.Register(ModuleName, 29, "cannot redelegate to the same validator") - ErrTinyRedelegationAmount = errors.Register(ModuleName, 30, "too few tokens to redelegate (truncates to zero tokens)") - ErrBadRedelegationDst = errors.Register(ModuleName, 31, "redelegation destination validator not found") - ErrTransitiveRedelegation = errors.Register(ModuleName, 32, "redelegation to this validator already in progress; first redelegation to this validator must complete before next redelegation") - ErrMaxRedelegationEntries = errors.Register(ModuleName, 33, "too many redelegation entries for (delegator, src-validator, dst-validator) tuple") - ErrDelegatorShareExRateInvalid = errors.Register(ModuleName, 34, "cannot delegate to validators with invalid (zero) ex-rate") - ErrBothShareMsgsGiven = errors.Register(ModuleName, 35, "both shares amount and shares percent provided") - ErrNeitherShareMsgsGiven = errors.Register(ModuleName, 36, "neither shares amount nor shares percent provided") - ErrInvalidHistoricalInfo = errors.Register(ModuleName, 37, "invalid historical info") - ErrNoHistoricalInfo = errors.Register(ModuleName, 38, "no historical info found") - ErrEmptyValidatorPubKey = errors.Register(ModuleName, 39, "empty validator public key") - ErrCommissionLTMinRate = errors.Register(ModuleName, 40, "commission cannot be less than min rate") - ErrUnbondingNotFound = errors.Register(ModuleName, 41, "unbonding operation not found") - ErrUnbondingOnHoldRefCountNegative = errors.Register(ModuleName, 42, "cannot un-hold unbonding operation that is not on hold") - ErrInvalidSigner = errors.Register(ModuleName, 43, "expected authority account as only signer for proposal message") - ErrBadRedelegationSrc = errors.Register(ModuleName, 44, "redelegation source validator not found") - ErrNoUnbondingType = errors.Register(ModuleName, 45, "unbonding type not found") + ErrEmptyValidatorAddr = errors.Register(ModuleName, 2, "empty validator address") + ErrNoValidatorFound = errors.Register(ModuleName, 3, "validator does not exist") + ErrValidatorOwnerExists = errors.Register(ModuleName, 4, "validator already exist for this operator address; must use new validator operator address") + ErrValidatorPubKeyExists = errors.Register(ModuleName, 5, "validator already exist for this pubkey; must use new validator pubkey") + ErrValidatorPubKeyTypeNotSupported = errors.Register(ModuleName, 6, "validator pubkey type is not supported") + ErrValidatorJailed = errors.Register(ModuleName, 7, "validator for this address is currently jailed") + ErrBadRemoveValidator = errors.Register(ModuleName, 8, "failed to remove validator") + ErrCommissionNegative = errors.Register(ModuleName, 9, "commission must be positive") + ErrCommissionHuge = errors.Register(ModuleName, 10, "commission cannot be more than 100%") + ErrCommissionGTMaxRate = errors.Register(ModuleName, 11, "commission cannot be more than the max rate") + ErrCommissionUpdateTime = errors.Register(ModuleName, 12, "commission cannot be changed more than once in 24h") + ErrCommissionChangeRateNegative = errors.Register(ModuleName, 13, "commission change rate must be positive") + ErrCommissionChangeRateGTMaxRate = errors.Register(ModuleName, 14, "commission change rate cannot be more than the max rate") + ErrCommissionGTMaxChangeRate = errors.Register(ModuleName, 15, "commission cannot be changed more than max change rate") + ErrSelfDelegationBelowMinimum = errors.Register(ModuleName, 16, "validator's self delegation must be greater than their minimum self delegation") + ErrMinSelfDelegationDecreased = errors.Register(ModuleName, 17, "minimum self delegation cannot be decrease") + ErrEmptyDelegatorAddr = errors.Register(ModuleName, 18, "empty delegator address") + ErrNoDelegation = errors.Register(ModuleName, 19, "no delegation for (address, validator) tuple") + ErrBadDelegatorAddr = errors.Register(ModuleName, 20, "delegator does not exist with address") + ErrNoDelegatorForAddress = errors.Register(ModuleName, 21, "delegator does not contain delegation") + ErrInsufficientShares = errors.Register(ModuleName, 22, "insufficient delegation shares") + ErrDelegationValidatorEmpty = errors.Register(ModuleName, 23, "cannot delegate to an empty validator") + ErrNotEnoughDelegationShares = errors.Register(ModuleName, 24, "not enough delegation shares") + ErrNotMature = errors.Register(ModuleName, 25, "entry not mature") + ErrNoUnbondingDelegation = errors.Register(ModuleName, 26, "no unbonding delegation found") + ErrMaxUnbondingDelegationEntries = errors.Register(ModuleName, 27, "too many unbonding delegation entries for (delegator, validator) tuple") + ErrNoRedelegation = errors.Register(ModuleName, 28, "no redelegation found") + ErrSelfRedelegation = errors.Register(ModuleName, 29, "cannot redelegate to the same validator") + ErrTinyRedelegationAmount = errors.Register(ModuleName, 30, "too few tokens to redelegate (truncates to zero tokens)") + ErrBadRedelegationDst = errors.Register(ModuleName, 31, "redelegation destination validator not found") + ErrTransitiveRedelegation = errors.Register(ModuleName, 32, "redelegation to this validator already in progress; first redelegation to this validator must complete before next redelegation") + ErrMaxRedelegationEntries = errors.Register(ModuleName, 33, "too many redelegation entries for (delegator, src-validator, dst-validator) tuple") + ErrDelegatorShareExRateInvalid = errors.Register(ModuleName, 34, "cannot delegate to validators with invalid (zero) ex-rate") + ErrBothShareMsgsGiven = errors.Register(ModuleName, 35, "both shares amount and shares percent provided") + ErrNeitherShareMsgsGiven = errors.Register(ModuleName, 36, "neither shares amount nor shares percent provided") + ErrInvalidHistoricalInfo = errors.Register(ModuleName, 37, "invalid historical info") + ErrNoHistoricalInfo = errors.Register(ModuleName, 38, "no historical info found") + ErrEmptyValidatorPubKey = errors.Register(ModuleName, 39, "empty validator public key") + ErrCommissionLTMinRate = errors.Register(ModuleName, 40, "commission cannot be less than min rate") + ErrUnbondingNotFound = errors.Register(ModuleName, 41, "unbonding operation not found") + ErrUnbondingOnHoldRefCountNegative = errors.Register(ModuleName, 42, "cannot un-hold unbonding operation that is not on hold") + ErrInvalidSigner = errors.Register(ModuleName, 43, "expected authority account as only signer for proposal message") + ErrBadRedelegationSrc = errors.Register(ModuleName, 44, "redelegation source validator not found") + ErrNoUnbondingType = errors.Register(ModuleName, 45, "unbonding type not found") + ErrPendingQueueSlotMissingCount = errors.Register(ModuleName, 46, "pending queue slot missing count") + ErrPendingQueueSlotInsufficientCapacity = errors.Register(ModuleName, 47, "pending queue slot insufficient capacity") ) diff --git a/x/staking/types/keys.go b/x/staking/types/keys.go index 8f51f6f8800d..69115a11d86c 100644 --- a/x/staking/types/keys.go +++ b/x/staking/types/keys.go @@ -51,6 +51,10 @@ var ( RedelegationQueueKey = []byte{0x42} // prefix for the timestamps in redelegations queue ValidatorQueueKey = []byte{0x43} // prefix for the timestamps in validator queue + ValidatorQueuePendingSlotsKey = []byte{0x44} // single key: pending validator queue slots + UBDQueuePendingSlotsKey = []byte{0x45} // single key: pending UBD queue times + RedelegationQueuePendingSlotsKey = []byte{0x46} // single key: pending redelegation queue times + HistoricalInfoKey = []byte{0x50} // prefix for the historical info ValidatorUpdatesKey = []byte{0x61} // prefix for the end block validator updates key @@ -62,6 +66,12 @@ var ( // Module developers MUST NOT use these keys and MUST consider them "reserved". ) +// TimeHeightQueueSlot is a (time, height) slot in the validator unbonding queue. +type TimeHeightQueueSlot struct { + Time time.Time + Height int64 +} + // UnbondingType defines the type of unbonding operation type UnbondingType int