74
74
errBadPeer = errors .New ("action from bad peer ignored" )
75
75
errStallingPeer = errors .New ("peer is stalling" )
76
76
errNoPeers = errors .New ("no peers to keep download active" )
77
- errPendingQueue = errors .New ("pending items in queue" )
78
77
errTimeout = errors .New ("timeout" )
79
78
errEmptyHashSet = errors .New ("empty hash set by peer" )
80
79
errEmptyHeaderSet = errors .New ("empty header set by peer" )
90
89
errCancelBodyFetch = errors .New ("block body download canceled (requested)" )
91
90
errCancelReceiptFetch = errors .New ("receipt download canceled (requested)" )
92
91
errCancelStateFetch = errors .New ("state data download canceled (requested)" )
92
+ errCancelProcessing = errors .New ("processing canceled (requested)" )
93
93
errNoSyncActive = errors .New ("no sync active" )
94
94
)
95
95
@@ -129,7 +129,6 @@ type Downloader struct {
129
129
// Status
130
130
synchroniseMock func (id string , hash common.Hash ) error // Replacement for synchronise during testing
131
131
synchronising int32
132
- processing int32
133
132
notified int32
134
133
135
134
// Channels
@@ -215,7 +214,7 @@ func (d *Downloader) Progress() (uint64, uint64, uint64) {
215
214
216
215
// Synchronising returns whether the downloader is currently retrieving blocks.
217
216
func (d * Downloader ) Synchronising () bool {
218
- return atomic .LoadInt32 (& d .synchronising ) > 0 || atomic . LoadInt32 ( & d . processing ) > 0
217
+ return atomic .LoadInt32 (& d .synchronising ) > 0
219
218
}
220
219
221
220
// RegisterPeer injects a new download peer into the set of block source to be
@@ -263,9 +262,6 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
263
262
glog .V (logger .Debug ).Infof ("Removing peer %v: %v" , id , err )
264
263
d .dropPeer (id )
265
264
266
- case errPendingQueue :
267
- glog .V (logger .Debug ).Infoln ("Synchronisation aborted:" , err )
268
-
269
265
default :
270
266
glog .V (logger .Warn ).Infof ("Synchronisation failed: %v" , err )
271
267
}
@@ -290,10 +286,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
290
286
if atomic .CompareAndSwapInt32 (& d .notified , 0 , 1 ) {
291
287
glog .V (logger .Info ).Infoln ("Block synchronisation started" )
292
288
}
293
- // Abort if the queue still contains some leftover data
294
- if d .queue .GetHeadResult () != nil {
295
- return errPendingQueue
296
- }
297
289
// Reset the queue, peer set and wake channels to clean any internal leftover state
298
290
d .queue .Reset ()
299
291
d .peers .Reset ()
@@ -335,7 +327,6 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
335
327
defer func () {
336
328
// reset on error
337
329
if err != nil {
338
- d .cancel ()
339
330
d .mux .Post (FailedEvent {err })
340
331
} else {
341
332
d .mux .Post (DoneEvent {})
@@ -365,23 +356,15 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
365
356
d .syncStatsChainHeight = latest
366
357
d .syncStatsLock .Unlock ()
367
358
368
- // Initiate the sync using a concurrent hash and block retrieval algorithm
359
+ // Initiate the sync using a concurrent hash and block retrieval algorithm
360
+ d .queue .Prepare (origin + 1 , d .mode , 0 )
369
361
if d .syncInitHook != nil {
370
362
d .syncInitHook (origin , latest )
371
363
}
372
- d .queue .Prepare (origin + 1 , d .mode , 0 )
373
-
374
- errc := make (chan error , 2 )
375
- go func () { errc <- d .fetchHashes61 (p , td , origin + 1 ) }()
376
- go func () { errc <- d .fetchBlocks61 (origin + 1 ) }()
377
-
378
- // If any fetcher fails, cancel the other
379
- if err := <- errc ; err != nil {
380
- d .cancel ()
381
- <- errc
382
- return err
383
- }
384
- return <- errc
364
+ return d .spawnSync (
365
+ func () error { return d .fetchHashes61 (p , td , origin + 1 ) },
366
+ func () error { return d .fetchBlocks61 (origin + 1 ) },
367
+ )
385
368
386
369
case p .version >= 62 :
387
370
// Look up the sync boundaries: the common ancestor and the target block
@@ -405,7 +388,6 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
405
388
switch d .mode {
406
389
case LightSync :
407
390
pivot = latest
408
-
409
391
case FastSync :
410
392
// Calculate the new fast/slow sync pivot point
411
393
pivotOffset , err := rand .Int (rand .Reader , big .NewInt (int64 (fsPivotInterval )))
@@ -426,34 +408,51 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
426
408
glog .V (logger .Debug ).Infof ("Fast syncing until pivot block #%d" , pivot )
427
409
}
428
410
d .queue .Prepare (origin + 1 , d .mode , pivot )
429
-
430
411
if d .syncInitHook != nil {
431
412
d .syncInitHook (origin , latest )
432
413
}
433
- errc := make (chan error , 4 )
434
- go func () { errc <- d .fetchHeaders (p , td , origin + 1 ) }() // Headers are always retrieved
435
- go func () { errc <- d .fetchBodies (origin + 1 ) }() // Bodies are retrieved during normal and fast sync
436
- go func () { errc <- d .fetchReceipts (origin + 1 ) }() // Receipts are retrieved during fast sync
437
- go func () { errc <- d .fetchNodeData () }() // Node state data is retrieved during fast sync
438
-
439
- // If any fetcher fails, cancel the others
440
- var fail error
441
- for i := 0 ; i < cap (errc ); i ++ {
442
- if err := <- errc ; err != nil {
443
- if fail == nil {
444
- fail = err
445
- d .cancel ()
446
- }
447
- }
448
- }
449
- return fail
414
+ return d .spawnSync (
415
+ func () error { return d .fetchHeaders (p , td , origin + 1 ) }, // Headers are always retrieved
416
+ func () error { return d .fetchBodies (origin + 1 ) }, // Bodies are retrieved during normal and fast sync
417
+ func () error { return d .fetchReceipts (origin + 1 ) }, // Receipts are retrieved during fast sync
418
+ func () error { return d .fetchNodeData () }, // Node state data is retrieved during fast sync
419
+ )
450
420
451
421
default :
452
422
// Something very wrong, stop right here
453
423
glog .V (logger .Error ).Infof ("Unsupported eth protocol: %d" , p .version )
454
424
return errBadPeer
455
425
}
456
- return nil
426
+ }
427
+
428
+ // spawnSync runs d.process and all given fetcher functions to completion in
429
+ // separate goroutines, returning the first error that appears.
430
+ func (d * Downloader ) spawnSync (fetchers ... func () error ) error {
431
+ var wg sync.WaitGroup
432
+ errc := make (chan error , len (fetchers )+ 1 )
433
+ wg .Add (len (fetchers ) + 1 )
434
+ go func () { defer wg .Done (); errc <- d .process () }()
435
+ for _ , fn := range fetchers {
436
+ fn := fn
437
+ go func () { defer wg .Done (); errc <- fn () }()
438
+ }
439
+ // Wait for the first error, then terminate the others.
440
+ var err error
441
+ for i := 0 ; i < len (fetchers )+ 1 ; i ++ {
442
+ if i == len (fetchers ) {
443
+ // Close the queue when all fetchers have exited.
444
+ // This will cause the block processor to end when
445
+ // it has processed the queue.
446
+ d .queue .Close ()
447
+ }
448
+ if err = <- errc ; err != nil {
449
+ break
450
+ }
451
+ }
452
+ d .queue .Close ()
453
+ d .cancel ()
454
+ wg .Wait ()
455
+ return err
457
456
}
458
457
459
458
// cancel cancels all of the operations and resets the queue. It returns true
@@ -470,12 +469,10 @@ func (d *Downloader) cancel() {
470
469
}
471
470
}
472
471
d .cancelLock .Unlock ()
473
-
474
- // Reset the queue
475
- d .queue .Reset ()
476
472
}
477
473
478
474
// Terminate interrupts the downloader, canceling all pending operations.
475
+ // The downloader cannot be reused after calling Terminate.
479
476
func (d * Downloader ) Terminate () {
480
477
atomic .StoreInt32 (& d .interrupt , 1 )
481
478
d .cancel ()
@@ -800,7 +797,6 @@ func (d *Downloader) fetchBlocks61(from uint64) error {
800
797
peer .Promote ()
801
798
peer .SetBlocksIdle ()
802
799
glog .V (logger .Detail ).Infof ("%s: delivered %d blocks" , peer , len (blocks ))
803
- go d .process ()
804
800
805
801
case errInvalidChain :
806
802
// The hash chain is invalid (blocks are not ordered properly), abort
@@ -826,7 +822,6 @@ func (d *Downloader) fetchBlocks61(from uint64) error {
826
822
peer .Demote ()
827
823
peer .SetBlocksIdle ()
828
824
glog .V (logger .Detail ).Infof ("%s: delivery partially failed: %v" , peer , err )
829
- go d .process ()
830
825
}
831
826
}
832
827
// Blocks arrived, try to update the progress
@@ -1336,10 +1331,8 @@ func (d *Downloader) fetchNodeData() error {
1336
1331
d .cancel ()
1337
1332
return
1338
1333
}
1339
- // Processing succeeded, notify state fetcher and processor of continuation
1340
- if d .queue .PendingNodeData () == 0 {
1341
- go d .process ()
1342
- } else {
1334
+ // Processing succeeded, notify state fetcher of continuation
1335
+ if d .queue .PendingNodeData () > 0 {
1343
1336
select {
1344
1337
case d .stateWakeCh <- true :
1345
1338
default :
@@ -1348,7 +1341,6 @@ func (d *Downloader) fetchNodeData() error {
1348
1341
// Log a message to the user and return
1349
1342
d .syncStatsLock .Lock ()
1350
1343
defer d .syncStatsLock .Unlock ()
1351
-
1352
1344
d .syncStatsStateDone += uint64 (delivered )
1353
1345
glog .V (logger .Info ).Infof ("imported %d state entries in %v: processed %d in total" , delivered , time .Since (start ), d .syncStatsStateDone )
1354
1346
})
@@ -1415,7 +1407,6 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
1415
1407
peer .Promote ()
1416
1408
setIdle (peer )
1417
1409
glog .V (logger .Detail ).Infof ("%s: delivered %s %s(s)" , peer , packet .Stats (), strings .ToLower (kind ))
1418
- go d .process ()
1419
1410
1420
1411
case errInvalidChain :
1421
1412
// The hash chain is invalid (blocks are not ordered properly), abort
@@ -1441,7 +1432,6 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
1441
1432
peer .Demote ()
1442
1433
setIdle (peer )
1443
1434
glog .V (logger .Detail ).Infof ("%s: %s delivery partially failed: %v" , peer , strings .ToLower (kind ), err )
1444
- go d .process ()
1445
1435
}
1446
1436
}
1447
1437
// Blocks assembled, try to update the progress
@@ -1508,7 +1498,6 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
1508
1498
}
1509
1499
if progress {
1510
1500
progressed = true
1511
- go d .process ()
1512
1501
}
1513
1502
if request == nil {
1514
1503
continue
@@ -1545,46 +1534,13 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
1545
1534
}
1546
1535
1547
1536
// process takes fetch results from the queue and tries to import them into the
1548
- // chain. The type of import operation will depend on the result contents:
1549
- // -
1550
- //
1551
- // The algorithmic flow is as follows:
1552
- // - The `processing` flag is swapped to 1 to ensure singleton access
1553
- // - The current `cancel` channel is retrieved to detect sync abortions
1554
- // - Blocks are iteratively taken from the cache and inserted into the chain
1555
- // - When the cache becomes empty, insertion stops
1556
- // - The `processing` flag is swapped back to 0
1557
- // - A post-exit check is made whether new blocks became available
1558
- // - This step is important: it handles a potential race condition between
1559
- // checking for no more work, and releasing the processing "mutex". In
1560
- // between these state changes, a block may have arrived, but a processing
1561
- // attempt denied, so we need to re-enter to ensure the block isn't left
1562
- // to idle in the cache.
1563
- func (d * Downloader ) process () {
1564
- // Make sure only one goroutine is ever allowed to process blocks at once
1565
- if ! atomic .CompareAndSwapInt32 (& d .processing , 0 , 1 ) {
1566
- return
1567
- }
1568
- // If the processor just exited, but there are freshly pending items, try to
1569
- // reenter. This is needed because the goroutine spinned up for processing
1570
- // the fresh results might have been rejected entry to to this present thread
1571
- // not yet releasing the `processing` state.
1572
- defer func () {
1573
- if atomic .LoadInt32 (& d .interrupt ) == 0 && d .queue .GetHeadResult () != nil {
1574
- d .process ()
1575
- }
1576
- }()
1577
- // Release the lock upon exit (note, before checking for reentry!)
1578
- // the import statistics to zero.
1579
- defer atomic .StoreInt32 (& d .processing , 0 )
1580
-
1581
- // Repeat the processing as long as there are results to process
1537
+ // chain. The type of import operation will depend on the result contents.
1538
+ func (d * Downloader ) process () error {
1539
+ pivot := d .queue .FastSyncPivot ()
1582
1540
for {
1583
- // Fetch the next batch of results
1584
- pivot := d .queue .FastSyncPivot () // Fetch pivot before results to prevent reset race
1585
- results := d .queue .TakeResults ()
1541
+ results := d .queue .WaitResults ()
1586
1542
if len (results ) == 0 {
1587
- return
1543
+ return nil // queue empty
1588
1544
}
1589
1545
if d .chainInsertHook != nil {
1590
1546
d .chainInsertHook (results )
@@ -1597,7 +1553,7 @@ func (d *Downloader) process() {
1597
1553
for len (results ) != 0 {
1598
1554
// Check for any termination requests
1599
1555
if atomic .LoadInt32 (& d .interrupt ) == 1 {
1600
- return
1556
+ return errCancelProcessing
1601
1557
}
1602
1558
// Retrieve the a batch of results to import
1603
1559
var (
@@ -1633,8 +1589,7 @@ func (d *Downloader) process() {
1633
1589
}
1634
1590
if err != nil {
1635
1591
glog .V (logger .Debug ).Infof ("Result #%d [%x…] processing failed: %v" , results [index ].Header .Number , results [index ].Header .Hash ().Bytes ()[:4 ], err )
1636
- d .cancel ()
1637
- return
1592
+ return err
1638
1593
}
1639
1594
// Shift the results to the next batch
1640
1595
results = results [items :]
@@ -1685,19 +1640,16 @@ func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, i
1685
1640
dropMeter .Mark (int64 (packet .Items ()))
1686
1641
}
1687
1642
}()
1688
- // Make sure the downloader is active
1689
- if atomic .LoadInt32 (& d .synchronising ) == 0 {
1690
- return errNoSyncActive
1691
- }
1692
1643
// Deliver or abort if the sync is canceled while queuing
1693
1644
d .cancelLock .RLock ()
1694
1645
cancel := d .cancelCh
1695
1646
d .cancelLock .RUnlock ()
1696
-
1647
+ if cancel == nil {
1648
+ return errNoSyncActive
1649
+ }
1697
1650
select {
1698
1651
case destCh <- packet :
1699
1652
return nil
1700
-
1701
1653
case <- cancel :
1702
1654
return errNoSyncActive
1703
1655
}
0 commit comments