@@ -53,6 +53,11 @@ type syncPack struct {
53
53
ignoreInitial bool
54
54
}
55
55
56
+ type hashPack struct {
57
+ peerId string
58
+ hashes []common.Hash
59
+ }
60
+
56
61
type Downloader struct {
57
62
mu sync.RWMutex
58
63
queue * queue
@@ -69,7 +74,7 @@ type Downloader struct {
69
74
70
75
// Channels
71
76
newPeerCh chan * peer
72
- hashCh chan []common. Hash
77
+ hashCh chan hashPack
73
78
blockCh chan blockPack
74
79
}
75
80
@@ -80,7 +85,7 @@ func New(hasBlock hashCheckFn, getBlock getBlockFn) *Downloader {
80
85
hasBlock : hasBlock ,
81
86
getBlock : getBlock ,
82
87
newPeerCh : make (chan * peer , 1 ),
83
- hashCh : make (chan []common. Hash , 1 ),
88
+ hashCh : make (chan hashPack , 1 ),
84
89
blockCh : make (chan blockPack , 1 ),
85
90
}
86
91
@@ -235,38 +240,50 @@ func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool)
235
240
}
236
241
237
242
// XXX Make synchronous
238
- func (d * Downloader ) startFetchingHashes (p * peer , hash common.Hash , ignoreInitial bool ) error {
243
+ func (d * Downloader ) startFetchingHashes (p * peer , h common.Hash , ignoreInitial bool ) error {
239
244
atomic .StoreInt32 (& d .fetchingHashes , 1 )
240
245
defer atomic .StoreInt32 (& d .fetchingHashes , 0 )
241
246
242
- if d .queue .has (hash ) {
247
+ if d .queue .has (h ) {
243
248
return errAlreadyInPool
244
249
}
245
250
246
- glog .V (logger .Debug ).Infof ("Downloading hashes (%x) from %s" , hash . Bytes () [:4 ], p .id )
251
+ glog .V (logger .Debug ).Infof ("Downloading hashes (%x) from %s" , h [:4 ], p .id )
247
252
248
253
start := time .Now ()
249
254
250
255
// We ignore the initial hash in some cases (e.g. we received a block without it's parent)
251
256
// In such circumstances we don't need to download the block so don't add it to the queue.
252
257
if ! ignoreInitial {
253
258
// Add the hash to the queue first
254
- d .queue .hashPool .Add (hash )
259
+ d .queue .hashPool .Add (h )
255
260
}
256
261
// Get the first batch of hashes
257
- p .getHashes (hash )
262
+ p .getHashes (h )
258
263
259
- failureResponseTimer := time .NewTimer (hashTtl )
264
+ var (
265
+ failureResponseTimer = time .NewTimer (hashTtl )
266
+ attemptedPeers = make (map [string ]bool ) // attempted peers will help with retries
267
+ activePeer = p // active peer will help determine the current active peer
268
+ hash common.Hash // common and last hash
269
+ )
270
+ attemptedPeers [p .id ] = true
260
271
261
272
out:
262
273
for {
263
274
select {
264
- case hashes := <- d .hashCh :
275
+ case hashPack := <- d .hashCh :
276
+ // make sure the active peer is giving us the hashes
277
+ if hashPack .peerId != activePeer .id {
278
+ glog .V (logger .Debug ).Infof ("Received hashes from incorrect peer(%s)\n " , hashPack .peerId )
279
+ break
280
+ }
281
+
265
282
failureResponseTimer .Reset (hashTtl )
266
283
267
284
var (
268
- done bool // determines whether we're done fetching hashes (i.e. common hash found)
269
- hash common. Hash // current and common hash
285
+ hashes = hashPack . hashes
286
+ done bool // determines whether we're done fetching hashes (i.e. common hash found)
270
287
)
271
288
hashSet := set .New ()
272
289
for _ , hash = range hashes {
@@ -283,13 +300,13 @@ out:
283
300
284
301
// Add hashes to the chunk set
285
302
if len (hashes ) == 0 { // Make sure the peer actually gave you something valid
286
- glog .V (logger .Debug ).Infof ("Peer (%s) responded with empty hash set\n " , p .id )
303
+ glog .V (logger .Debug ).Infof ("Peer (%s) responded with empty hash set\n " , activePeer .id )
287
304
d .queue .reset ()
288
305
289
306
return errEmptyHashSet
290
307
} else if ! done { // Check if we're done fetching
291
308
// Get the next set of hashes
292
- p .getHashes (hashes [ len ( hashes ) - 1 ] )
309
+ activePeer .getHashes (hash )
293
310
} else { // we're done
294
311
// The offset of the queue is determined by the highest known block
295
312
var offset int
@@ -303,12 +320,30 @@ out:
303
320
}
304
321
case <- failureResponseTimer .C :
305
322
glog .V (logger .Debug ).Infof ("Peer (%s) didn't respond in time for hash request\n " , p .id )
306
- // TODO instead of reseting the queue select a new peer from which we can start downloading hashes.
307
- // 1. check for peer's best hash to be included in the current hash set;
308
- // 2. resume from last point (hashes[len(hashes)-1]) using the newly selected peer.
309
- d .queue .reset ()
310
323
311
- return errTimeout
324
+ var p * peer // p will be set if a peer can be found
325
+ // Attempt to find a new peer by checking inclusion of peers best hash in our
326
+ // already fetched hash list. This can't guarantee 100% correctness but does
327
+ // a fair job. This is always either correct or false incorrect.
328
+ for id , peer := range d .peers {
329
+ if d .queue .hashPool .Has (peer .recentHash ) && ! attemptedPeers [id ] {
330
+ p = peer
331
+ break
332
+ }
333
+ }
334
+
335
+ // if all peers have been tried, abort the process entirely or if the hash is
336
+ // the zero hash.
337
+ if p == nil || (hash == common.Hash {}) {
338
+ d .queue .reset ()
339
+ return errTimeout
340
+ }
341
+
342
+ // set p to the active peer. this will invalidate any hashes that may be returned
343
+ // by our previous (delayed) peer.
344
+ activePeer = p
345
+ p .getHashes (hash )
346
+ glog .V (logger .Debug ).Infof ("Hash fetching switched to new peer(%s)\n " , p .id )
312
347
}
313
348
}
314
349
glog .V (logger .Detail ).Infof ("Downloaded hashes (%d) in %v\n " , d .queue .hashPool .Size (), time .Since (start ))
@@ -454,7 +489,7 @@ func (d *Downloader) AddHashes(id string, hashes []common.Hash) error {
454
489
glog .Infof ("adding %d (T=%d) hashes [ %x / %x ] from: %s\n " , len (hashes ), d .queue .hashPool .Size (), from [:4 ], to [:4 ], id )
455
490
}
456
491
457
- d .hashCh <- hashes
492
+ d .hashCh <- hashPack { id , hashes }
458
493
459
494
return nil
460
495
}
0 commit comments