Skip to content

Commit 5993841

Browse files
test: actors: manual CC onboarding and proving integration test (#12017)
* remove client CLI * remove markets CLI from miner * remove markets from all CLI * remove client API * update go mod * remove EnableMarkets flag * remove market subsystem * remove dagstore * remove index provider * remove graphsync and data-transfer * remove markets * go mod tidy * fix cbor gen deps * remove deal making from config * remove eol alert * go mod tidy * changes as per review * make jen * changes as per review * test: actors: manual CC onboarding and proving integration test * test: actors: manual CC onboarding itest with real proofs * test: actors: fix lint issue, require proofs in CI * test: actors: rename real proofs test, fix dispute window wait * feat: add TestUnmanagedMiner in the itest kit for non-storage managed miners * feat: test: improve UnmanagedMiner test harness * feat: test: MineBlocksMustPost can watch for >1 miners (#12063) * feat: test: MineBlocksMustPost can watch for >1 miners * feat: test: wait for both sectors at the end of test * feat: test: minor manual onboarding test fixups and speed up * feat: test: handle case where miners have close deadline ends * Implement snap deals test for manual sector onboarding (#12066) * changes as per review * thread safety * test for snap deals * remove extraneous change * Apply suggestions from code review Co-authored-by: Rod Vagg <[email protected]> * cancel CC Post after snap deals --------- Co-authored-by: Rod Vagg <[email protected]> * fix config --------- Co-authored-by: aarshkshah1992 <[email protected]>
1 parent e3deda0 commit 5993841

File tree

6 files changed

+1447
-39
lines changed

6 files changed

+1447
-39
lines changed

.github/workflows/test.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@ jobs:
9797
"itest-get_messages_in_ts": ["self-hosted", "linux", "x64", "xlarge"],
9898
"itest-lite_migration": ["self-hosted", "linux", "x64", "xlarge"],
9999
"itest-lookup_robust_address": ["self-hosted", "linux", "x64", "xlarge"],
100+
"itest-manual_onboarding": ["self-hosted", "linux", "x64", "xlarge"],
100101
"itest-mempool": ["self-hosted", "linux", "x64", "xlarge"],
101102
"itest-mpool_msg_uuid": ["self-hosted", "linux", "x64", "xlarge"],
102103
"itest-mpool_push_with_uuid": ["self-hosted", "linux", "x64", "xlarge"],
@@ -129,6 +130,7 @@ jobs:
129130
"itest-deals",
130131
"itest-direct_data_onboard_verified",
131132
"itest-direct_data_onboard",
133+
"itest-manual_onboarding",
132134
"itest-net",
133135
"itest-path_detach_redeclare",
134136
"itest-path_type_filters",

itests/kit/blockminer.go

Lines changed: 104 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import (
1212

1313
"github.com/stretchr/testify/require"
1414

15+
"github.com/filecoin-project/go-address"
1516
"github.com/filecoin-project/go-bitfield"
1617
"github.com/filecoin-project/go-jsonrpc"
1718
"github.com/filecoin-project/go-state-types/abi"
@@ -20,6 +21,7 @@ import (
2021
"github.com/filecoin-project/go-state-types/dline"
2122

2223
"github.com/filecoin-project/lotus/api"
24+
"github.com/filecoin-project/lotus/api/v1api"
2325
"github.com/filecoin-project/lotus/chain/types"
2426
"github.com/filecoin-project/lotus/miner"
2527
)
@@ -29,11 +31,13 @@ type BlockMiner struct {
2931
t *testing.T
3032
miner *TestMiner
3133

32-
nextNulls int64
33-
pause chan struct{}
34-
unpause chan struct{}
35-
wg sync.WaitGroup
36-
cancel context.CancelFunc
34+
nextNulls int64
35+
postWatchMiners []address.Address
36+
postWatchMinersLk sync.Mutex
37+
pause chan struct{}
38+
unpause chan struct{}
39+
wg sync.WaitGroup
40+
cancel context.CancelFunc
3741
}
3842

3943
func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
@@ -46,19 +50,58 @@ func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
4650
}
4751
}
4852

53+
type minerDeadline struct {
54+
addr address.Address
55+
deadline dline.Info
56+
}
57+
58+
type minerDeadlines []minerDeadline
59+
60+
func (mds minerDeadlines) CloseList() []abi.ChainEpoch {
61+
var ret []abi.ChainEpoch
62+
for _, md := range mds {
63+
ret = append(ret, md.deadline.Last())
64+
}
65+
return ret
66+
}
67+
68+
func (mds minerDeadlines) MinerStringList() []string {
69+
var ret []string
70+
for _, md := range mds {
71+
ret = append(ret, md.addr.String())
72+
}
73+
return ret
74+
}
75+
76+
// FilterByLast returns a new minerDeadlines with only the deadlines that have a Last() epoch
77+
// greater than or equal to last.
78+
func (mds minerDeadlines) FilterByLast(last abi.ChainEpoch) minerDeadlines {
79+
var ret minerDeadlines
80+
for _, md := range mds {
81+
if last >= md.deadline.Last() {
82+
ret = append(ret, md)
83+
}
84+
}
85+
return ret
86+
}
87+
4988
type partitionTracker struct {
89+
minerAddr address.Address
5090
partitions []api.Partition
5191
posted bitfield.BitField
5292
}
5393

54-
func newPartitionTracker(ctx context.Context, dlIdx uint64, bm *BlockMiner) *partitionTracker {
55-
dlines, err := bm.miner.FullNode.StateMinerDeadlines(ctx, bm.miner.ActorAddr, types.EmptyTSK)
56-
require.NoError(bm.t, err)
94+
// newPartitionTracker creates a new partitionTracker that tracks the deadline index dlIdx for the
95+
// given minerAddr. It uses the BlockMiner bm to interact with the chain.
96+
func newPartitionTracker(ctx context.Context, t *testing.T, client v1api.FullNode, minerAddr address.Address, dlIdx uint64) *partitionTracker {
97+
dlines, err := client.StateMinerDeadlines(ctx, minerAddr, types.EmptyTSK)
98+
require.NoError(t, err)
5799
dl := dlines[dlIdx]
58100

59-
parts, err := bm.miner.FullNode.StateMinerPartitions(ctx, bm.miner.ActorAddr, dlIdx, types.EmptyTSK)
60-
require.NoError(bm.t, err)
101+
parts, err := client.StateMinerPartitions(ctx, minerAddr, dlIdx, types.EmptyTSK)
102+
require.NoError(t, err)
61103
return &partitionTracker{
104+
minerAddr: minerAddr,
62105
partitions: parts,
63106
posted: dl.PostSubmissions,
64107
}
@@ -74,11 +117,11 @@ func (p *partitionTracker) done(t *testing.T) bool {
74117
return uint64(len(p.partitions)) == p.count(t)
75118
}
76119

77-
func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types.Message) (ret bool) {
120+
func (p *partitionTracker) recordIfPost(t *testing.T, msg *types.Message) (ret bool) {
78121
defer func() {
79122
ret = p.done(t)
80123
}()
81-
if !(msg.To == bm.miner.ActorAddr) {
124+
if !(msg.To == p.minerAddr) {
82125
return
83126
}
84127
if msg.Method != builtin.MethodsMiner.SubmitWindowedPoSt {
@@ -92,19 +135,18 @@ func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types
92135
return
93136
}
94137

95-
func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *dline.Info) {
96-
97-
tracker := newPartitionTracker(ctx, dlinfo.Index, bm)
138+
func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, minerAddr address.Address, dlinfo dline.Info) {
139+
tracker := newPartitionTracker(ctx, bm.t, bm.miner.FullNode, minerAddr, dlinfo.Index)
98140
if !tracker.done(bm.t) { // need to wait for post
99141
bm.t.Logf("expect %d partitions proved but only see %d", len(tracker.partitions), tracker.count(bm.t))
100-
poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) //subscribe before checking pending so we don't miss any events
142+
poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) // subscribe before checking pending so we don't miss any events
101143
require.NoError(bm.t, err)
102144

103145
// First check pending messages we'll mine this epoch
104146
msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK)
105147
require.NoError(bm.t, err)
106148
for _, msg := range msgs {
107-
if tracker.recordIfPost(bm.t, bm, &msg.Message) {
149+
if tracker.recordIfPost(bm.t, &msg.Message) {
108150
fmt.Printf("found post in mempool pending\n")
109151
}
110152
}
@@ -114,13 +156,13 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d
114156
msgs, err := bm.miner.FullNode.ChainGetBlockMessages(ctx, bc)
115157
require.NoError(bm.t, err)
116158
for _, msg := range msgs.BlsMessages {
117-
if tracker.recordIfPost(bm.t, bm, msg) {
159+
if tracker.recordIfPost(bm.t, msg) {
118160
fmt.Printf("found post in message of prev tipset\n")
119161
}
120162

121163
}
122164
for _, msg := range msgs.SecpkMessages {
123-
if tracker.recordIfPost(bm.t, bm, &msg.Message) {
165+
if tracker.recordIfPost(bm.t, &msg.Message) {
124166
fmt.Printf("found post in message of prev tipset\n")
125167
}
126168
}
@@ -139,7 +181,7 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d
139181
bm.t.Logf("pool event: %d", evt.Type)
140182
if evt.Type == api.MpoolAdd {
141183
bm.t.Logf("incoming message %v", evt.Message)
142-
if tracker.recordIfPost(bm.t, bm, &evt.Message.Message) {
184+
if tracker.recordIfPost(bm.t, &evt.Message.Message) {
143185
fmt.Printf("found post in mempool evt\n")
144186
break POOL
145187
}
@@ -151,11 +193,24 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d
151193
}
152194
}
153195

196+
// WatchMinerForPost adds a miner to the list of miners that the BlockMiner will watch for window
197+
// post submissions when using MineBlocksMustPost. This is useful when we have more than just the
198+
// BlockMiner submitting posts, particularly in the case of UnmanagedMiners which don't participate
199+
// in block mining.
200+
func (bm *BlockMiner) WatchMinerForPost(minerAddr address.Address) {
201+
bm.postWatchMinersLk.Lock()
202+
bm.postWatchMiners = append(bm.postWatchMiners, minerAddr)
203+
bm.postWatchMinersLk.Unlock()
204+
}
205+
154206
// Like MineBlocks but refuses to mine until the window post scheduler has wdpost messages in the mempool
155207
// and everything shuts down if a post fails. It also enforces that every block mined succeeds
156208
func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Duration) {
157209
time.Sleep(time.Second)
158210

211+
// watch for our own window posts
212+
bm.WatchMinerForPost(bm.miner.ActorAddr)
213+
159214
// wrap context in a cancellable context.
160215
ctx, bm.cancel = context.WithCancel(ctx)
161216
bm.wg.Add(1)
@@ -182,11 +237,25 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur
182237
ts, err := bm.miner.FullNode.ChainHead(ctx)
183238
require.NoError(bm.t, err)
184239

185-
dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, bm.miner.ActorAddr, ts.Key())
186-
require.NoError(bm.t, err)
187-
if ts.Height()+5+abi.ChainEpoch(nulls) >= dlinfo.Last() { // Next block brings us past the last epoch in dline, we need to wait for miner to post
188-
bm.t.Logf("forcing post to get in before deadline closes at %d", dlinfo.Last())
189-
bm.forcePoSt(ctx, ts, dlinfo)
240+
// Get current deadline information for all miners, then filter by the ones that are about to
241+
// close so we can force a post for them.
242+
bm.postWatchMinersLk.Lock()
243+
var impendingDeadlines minerDeadlines
244+
for _, minerAddr := range bm.postWatchMiners {
245+
dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, minerAddr, ts.Key())
246+
require.NoError(bm.t, err)
247+
require.NotNil(bm.t, dlinfo, "no deadline info for miner %s", minerAddr)
248+
impendingDeadlines = append(impendingDeadlines, minerDeadline{addr: minerAddr, deadline: *dlinfo})
249+
}
250+
bm.postWatchMinersLk.Unlock()
251+
impendingDeadlines = impendingDeadlines.FilterByLast(ts.Height() + 5 + abi.ChainEpoch(nulls))
252+
253+
if len(impendingDeadlines) > 0 {
254+
// Next block brings us too close for at least one deadline, we need to wait for miners to post
255+
bm.t.Logf("forcing post to get in if due before deadline closes at %v for %v", impendingDeadlines.CloseList(), impendingDeadlines.MinerStringList())
256+
for _, md := range impendingDeadlines {
257+
bm.forcePoSt(ctx, ts, md.addr, md.deadline)
258+
}
190259
}
191260

192261
var target abi.ChainEpoch
@@ -216,10 +285,13 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur
216285
return
217286
}
218287
if !success {
219-
// if we are mining a new null block and it brings us past deadline boundary we need to wait for miner to post
220-
if ts.Height()+5+abi.ChainEpoch(nulls+i) >= dlinfo.Last() {
221-
bm.t.Logf("forcing post to get in before deadline closes at %d", dlinfo.Last())
222-
bm.forcePoSt(ctx, ts, dlinfo)
288+
// if we are mining a new null block and it brings us past deadline boundary we need to wait for miners to post
289+
impendingDeadlines = impendingDeadlines.FilterByLast(ts.Height() + 5 + abi.ChainEpoch(nulls+i))
290+
if len(impendingDeadlines) > 0 {
291+
bm.t.Logf("forcing post to get in if due before deadline closes at %v for %v", impendingDeadlines.CloseList(), impendingDeadlines.MinerStringList())
292+
for _, md := range impendingDeadlines {
293+
bm.forcePoSt(ctx, ts, md.addr, md.deadline)
294+
}
223295
}
224296
}
225297
}
@@ -378,4 +450,7 @@ func (bm *BlockMiner) Stop() {
378450
close(bm.pause)
379451
bm.pause = nil
380452
}
453+
bm.postWatchMinersLk.Lock()
454+
bm.postWatchMiners = nil
455+
bm.postWatchMinersLk.Unlock()
381456
}

0 commit comments

Comments
 (0)