Skip to content

Commit b9bac1f

Browse files
rjl493456442zsfelfoldi
authored andcommitted
les: fix and slim the unit tests of les (#20247)
* les: loose restriction of unit tests * les: update unit tests * les, light: slim the unit tests
1 parent fc3661f commit b9bac1f

File tree

8 files changed

+61
-31
lines changed

8 files changed

+61
-31
lines changed

les/clientpool.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -459,6 +459,9 @@ func (f *clientPool) addBalance(id enode.ID, amount uint64, setTotal bool) {
459459
defer func() {
460460
c.balanceTracker.setBalance(pb.value, negBalance)
461461
if !c.priority && pb.value > 0 {
462+
// The capacity should be adjusted based on the requirement,
463+
// but we have no idea about the new capacity, need a second
464+
// call to udpate it.
462465
c.priority = true
463466
c.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) })
464467
}

les/clientpool_test.go

Lines changed: 24 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,14 @@ func (i poolTestPeer) freeClientId() string {
6868

6969
func (i poolTestPeer) updateCapacity(uint64) {}
7070

71+
type poolTestPeerWithCap struct {
72+
poolTestPeer
73+
74+
cap uint64
75+
}
76+
77+
func (i *poolTestPeerWithCap) updateCapacity(cap uint64) { i.cap = cap }
78+
7179
func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomDisconnect bool) {
7280
rand.Seed(time.Now().UnixNano())
7381
var (
@@ -308,9 +316,9 @@ func TestFreeClientKickedOut(t *testing.T) {
308316

309317
for i := 0; i < 10; i++ {
310318
pool.connect(poolTestPeer(i), 1)
311-
clock.Run(100 * time.Millisecond)
319+
clock.Run(time.Millisecond)
312320
}
313-
if pool.connect(poolTestPeer(11), 1) {
321+
if pool.connect(poolTestPeer(10), 1) {
314322
t.Fatalf("New free client should be rejected")
315323
}
316324
clock.Run(5 * time.Minute)
@@ -320,8 +328,8 @@ func TestFreeClientKickedOut(t *testing.T) {
320328
for i := 0; i < 10; i++ {
321329
select {
322330
case id := <-kicked:
323-
if id != i {
324-
t.Fatalf("Kicked client mismatch, want %v, got %v", i, id)
331+
if id >= 10 {
332+
t.Fatalf("Old client should be kicked, now got: %d", id)
325333
}
326334
case <-time.NewTimer(time.Second).C:
327335
t.Fatalf("timeout")
@@ -364,11 +372,20 @@ func TestDowngradePriorityClient(t *testing.T) {
364372
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
365373
pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
366374

367-
pool.addBalance(poolTestPeer(0).ID(), uint64(time.Minute), false)
368-
pool.connect(poolTestPeer(0), 10)
375+
p := &poolTestPeerWithCap{
376+
poolTestPeer: poolTestPeer(0),
377+
}
378+
pool.addBalance(p.ID(), uint64(time.Minute), false)
379+
pool.connect(p, 10)
380+
if p.cap != 10 {
381+
t.Fatalf("The capcacity of priority peer hasn't been updated, got: %d", p.cap)
382+
}
383+
369384
clock.Run(time.Minute) // All positive balance should be used up.
370385
time.Sleep(300 * time.Millisecond) // Ensure the callback is called
371-
386+
if p.cap != 1 {
387+
t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
388+
}
372389
pb := pool.ndb.getOrNewPB(poolTestPeer(0).ID())
373390
if pb.value != 0 {
374391
t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb.value)

les/distributor.go

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -110,13 +110,15 @@ func (d *requestDistributor) registerTestPeer(p distPeer) {
110110
d.peerLock.Unlock()
111111
}
112112

113-
// distMaxWait is the maximum waiting time after which further necessary waiting
114-
// times are recalculated based on new feedback from the servers
115-
const distMaxWait = time.Millisecond * 50
113+
var (
114+
// distMaxWait is the maximum waiting time after which further necessary waiting
115+
// times are recalculated based on new feedback from the servers
116+
distMaxWait = time.Millisecond * 50
116117

117-
// waitForPeers is the time window in which a request does not fail even if it
118-
// has no suitable peers to send to at the moment
119-
const waitForPeers = time.Second * 3
118+
// waitForPeers is the time window in which a request does not fail even if it
119+
// has no suitable peers to send to at the moment
120+
waitForPeers = time.Second * 3
121+
)
120122

121123
// main event loop
122124
func (d *requestDistributor) loop() {

les/distributor_test.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,8 @@ func (p *testDistPeer) worker(t *testing.T, checkOrder bool, stop chan struct{})
8686
const (
8787
testDistBufLimit = 10000000
8888
testDistMaxCost = 1000000
89-
testDistPeerCount = 5
90-
testDistReqCount = 5000
89+
testDistPeerCount = 2
90+
testDistReqCount = 10
9191
testDistMaxResendCount = 3
9292
)
9393

@@ -128,6 +128,9 @@ func testRequestDistributor(t *testing.T, resend bool) {
128128
go peers[i].worker(t, !resend, stop)
129129
dist.registerTestPeer(peers[i])
130130
}
131+
// Disable the mechanism that we will wait a few time for request
132+
// even there is no suitable peer to send right now.
133+
waitForPeers = 0
131134

132135
var wg sync.WaitGroup
133136

les/odr_test.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,6 +193,9 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
193193
if clientHead.Number.Uint64() != 4 {
194194
t.Fatalf("Failed to sync the chain with server, head: %v", clientHead.Number.Uint64())
195195
}
196+
// Disable the mechanism that we will wait a few time for request
197+
// even there is no suitable peer to send right now.
198+
waitForPeers = 0
196199

197200
test := func(expFail uint64) {
198201
// Mark this as a helper to put the failures at the correct lines
@@ -202,7 +205,9 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
202205
bhash := rawdb.ReadCanonicalHash(server.db, i)
203206
b1 := fn(light.NoOdr, server.db, server.handler.server.chainConfig, server.handler.blockchain, nil, bhash)
204207

205-
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
208+
// Set the timeout as 1 second here, ensure there is enough time
209+
// for travis to make the action.
210+
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
206211
b2 := fn(ctx, client.db, client.handler.backend.chainConfig, nil, client.handler.backend.blockchain, bhash)
207212
cancel()
208213

les/sync_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) {
8989
for {
9090
_, hash, _, err := server.handler.server.oracle.contract.Contract().GetLatestCheckpoint(nil)
9191
if err != nil || hash == [32]byte{} {
92-
time.Sleep(100 * time.Millisecond)
92+
time.Sleep(10 * time.Millisecond)
9393
continue
9494
}
9595
break

les/test_helper.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,10 +71,10 @@ var (
7171

7272
var (
7373
// The block frequency for creating checkpoint(only used in test)
74-
sectionSize = big.NewInt(512)
74+
sectionSize = big.NewInt(128)
7575

7676
// The number of confirmations needed to generate a checkpoint(only used in test).
77-
processConfirms = big.NewInt(4)
77+
processConfirms = big.NewInt(1)
7878

7979
// The token bucket buffer limit for testing purpose.
8080
testBufLimit = uint64(1000000)

light/postprocess.go

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -79,21 +79,21 @@ var (
7979
}
8080
// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
8181
TestServerIndexerConfig = &IndexerConfig{
82-
ChtSize: 512,
83-
ChtConfirms: 4,
84-
BloomSize: 64,
85-
BloomConfirms: 4,
86-
BloomTrieSize: 512,
87-
BloomTrieConfirms: 4,
82+
ChtSize: 128,
83+
ChtConfirms: 1,
84+
BloomSize: 16,
85+
BloomConfirms: 1,
86+
BloomTrieSize: 128,
87+
BloomTrieConfirms: 1,
8888
}
8989
// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
9090
TestClientIndexerConfig = &IndexerConfig{
91-
ChtSize: 512,
92-
ChtConfirms: 32,
93-
BloomSize: 512,
94-
BloomConfirms: 32,
95-
BloomTrieSize: 512,
96-
BloomTrieConfirms: 32,
91+
ChtSize: 128,
92+
ChtConfirms: 8,
93+
BloomSize: 128,
94+
BloomConfirms: 8,
95+
BloomTrieSize: 128,
96+
BloomTrieConfirms: 8,
9797
}
9898
)
9999

0 commit comments

Comments
 (0)