@@ -24,12 +24,12 @@ var (
24
24
blockTtl = 20 * time .Second // The amount of time it takes for a block request to time out
25
25
26
26
errLowTd = errors .New ("peer's TD is too low" )
27
- errBusy = errors .New ("busy" )
27
+ ErrBusy = errors .New ("busy" )
28
28
errUnknownPeer = errors .New ("peer's unknown or unhealthy" )
29
- ErrBadPeer = errors .New ("action from bad peer ignored" )
29
+ errBadPeer = errors .New ("action from bad peer ignored" )
30
30
errNoPeers = errors .New ("no peers to keep download active" )
31
31
errPendingQueue = errors .New ("pending items in queue" )
32
- errTimeout = errors .New ("timeout" )
32
+ ErrTimeout = errors .New ("timeout" )
33
33
errEmptyHashSet = errors .New ("empty hash set by peer" )
34
34
errPeersUnavailable = errors .New ("no peers available or all peers tried for block download process" )
35
35
errAlreadyInPool = errors .New ("hash already in pool" )
@@ -68,7 +68,7 @@ type Downloader struct {
68
68
getBlock getBlockFn
69
69
70
70
// Status
71
- synchronizing int32
71
+ synchronising int32
72
72
73
73
// Channels
74
74
newPeerCh chan * peer
@@ -119,15 +119,15 @@ func (d *Downloader) UnregisterPeer(id string) {
119
119
delete (d .peers , id )
120
120
}
121
121
122
- // Synchronize will select the peer and use it for synchronizing . If an empty string is given
122
+ // Synchronise will select the peer and use it for synchronising . If an empty string is given
123
123
// it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the
124
124
// checks fail an error will be returned. This method is synchronous
125
- func (d * Downloader ) Synchronize (id string , hash common.Hash ) error {
125
+ func (d * Downloader ) Synchronise (id string , hash common.Hash ) error {
126
126
// Make sure only one goroutine is ever allowed past this point at once
127
- if ! atomic .CompareAndSwapInt32 (& d .synchronizing , 0 , 1 ) {
128
- return nil
127
+ if ! atomic .CompareAndSwapInt32 (& d .synchronising , 0 , 1 ) {
128
+ return ErrBusy
129
129
}
130
- defer atomic .StoreInt32 (& d .synchronizing , 0 )
130
+ defer atomic .StoreInt32 (& d .synchronising , 0 )
131
131
132
132
// Abort if the queue still contains some leftover data
133
133
if _ , cached := d .queue .Size (); cached > 0 {
272
272
// the zero hash.
273
273
if p == nil || (hash == common.Hash {}) {
274
274
d .queue .Reset ()
275
- return errTimeout
275
+ return ErrTimeout
276
276
}
277
277
278
278
// set p to the active peer. this will invalidate any hashes that may be returned
282
282
glog .V (logger .Debug ).Infof ("Hash fetching switched to new peer(%s)\n " , p .id )
283
283
}
284
284
}
285
- glog .V (logger .Detail ).Infof ("Downloaded hashes (%d) in %v\n " , d .queue .Pending (), time .Since (start ))
285
+ glog .V (logger .Debug ).Infof ("Downloaded hashes (%d) in %v\n " , d .queue .Pending (), time .Since (start ))
286
286
287
287
return nil
288
288
}
384
384
}
385
385
}
386
386
}
387
-
388
387
glog .V (logger .Detail ).Infoln ("Downloaded block(s) in" , time .Since (start ))
389
388
390
389
return nil
@@ -404,11 +403,10 @@ func (d *Downloader) AddHashes(id string, hashes []common.Hash) error {
404
403
return fmt .Errorf ("received hashes from %s while active peer is %s" , id , d .activePeer )
405
404
}
406
405
407
- if glog .V (logger .Detail ) && len (hashes ) != 0 {
406
+ if glog .V (logger .Debug ) && len (hashes ) != 0 {
408
407
from , to := hashes [0 ], hashes [len (hashes )- 1 ]
409
- glog .Infof ("adding %d (T=%d) hashes [ %x / %x ] from: %s\n " , len (hashes ), d .queue .Pending (), from [:4 ], to [:4 ], id )
408
+ glog .V ( logger . Debug ). Infof ("adding %d (T=%d) hashes [ %x / %x ] from: %s\n " , len (hashes ), d .queue .Pending (), from [:4 ], to [:4 ], id )
410
409
}
411
-
412
410
d .hashCh <- hashPack {id , hashes }
413
411
414
412
return nil
0 commit comments