@@ -20,6 +20,7 @@ import (
20
20
"bytes"
21
21
"errors"
22
22
"fmt"
23
+ "math"
23
24
mrand "math/rand"
24
25
"sort"
25
26
"time"
@@ -105,6 +106,14 @@ var (
105
106
type txAnnounce struct {
106
107
origin string // Identifier of the peer originating the notification
107
108
hashes []common.Hash // Batch of transaction hashes being announced
109
+ metas []* txMetadata // Batch of metadatas associated with the hashes (nil before eth/68)
110
+ }
111
+
112
+ // txMetadata is a set of extra data transmitted along the announcement for better
113
+ // fetch scheduling.
114
+ type txMetadata struct {
115
+ kind byte // Transaction consensus type
116
+ size uint32 // Transaction size in bytes
108
117
}
109
118
110
119
// txRequest represents an in-flight transaction retrieval request destined to
@@ -120,6 +129,7 @@ type txRequest struct {
120
129
type txDelivery struct {
121
130
origin string // Identifier of the peer originating the notification
122
131
hashes []common.Hash // Batch of transaction hashes having been delivered
132
+ metas []txMetadata // Batch of metadatas associated with the delivered hashes
123
133
direct bool // Whether this is a direct reply or a broadcast
124
134
}
125
135
@@ -155,14 +165,14 @@ type TxFetcher struct {
155
165
156
166
// Stage 1: Waiting lists for newly discovered transactions that might be
157
167
// broadcast without needing explicit request/reply round trips.
158
- waitlist map [common.Hash ]map [string ]struct {} // Transactions waiting for an potential broadcast
159
- waittime map [common.Hash ]mclock.AbsTime // Timestamps when transactions were added to the waitlist
160
- waitslots map [string ]map [common.Hash ]struct {} // Waiting announcements grouped by peer (DoS protection)
168
+ waitlist map [common.Hash ]map [string ]struct {} // Transactions waiting for an potential broadcast
169
+ waittime map [common.Hash ]mclock.AbsTime // Timestamps when transactions were added to the waitlist
170
+ waitslots map [string ]map [common.Hash ]* txMetadata // Waiting announcements grouped by peer (DoS protection)
161
171
162
172
// Stage 2: Queue of transactions that waiting to be allocated to some peer
163
173
// to be retrieved directly.
164
- announces map [string ]map [common.Hash ]struct {} // Set of announced transactions, grouped by origin peer
165
- announced map [common.Hash ]map [string ]struct {} // Set of download locations, grouped by transaction hash
174
+ announces map [string ]map [common.Hash ]* txMetadata // Set of announced transactions, grouped by origin peer
175
+ announced map [common.Hash ]map [string ]struct {} // Set of download locations, grouped by transaction hash
166
176
167
177
// Stage 3: Set of transactions currently being retrieved, some which may be
168
178
// fulfilled and some rescheduled. Note, this step shares 'announces' from the
@@ -175,6 +185,7 @@ type TxFetcher struct {
175
185
hasTx func (common.Hash ) bool // Retrieves a tx from the local txpool
176
186
addTxs func ([]* types.Transaction ) []error // Insert a batch of transactions into local txpool
177
187
fetchTxs func (string , []common.Hash ) error // Retrieves a set of txs from a remote peer
188
+ dropPeer func (string ) // Drops a peer in case of announcement violation
178
189
179
190
step chan struct {} // Notification channel when the fetcher loop iterates
180
191
clock mclock.Clock // Time wrapper to simulate in tests
@@ -183,14 +194,14 @@ type TxFetcher struct {
183
194
184
195
// NewTxFetcher creates a transaction fetcher to retrieve transaction
185
196
// based on hash announcements.
186
- func NewTxFetcher (hasTx func (common.Hash ) bool , addTxs func ([]* types.Transaction ) []error , fetchTxs func (string , []common.Hash ) error ) * TxFetcher {
187
- return NewTxFetcherForTests (hasTx , addTxs , fetchTxs , mclock.System {}, nil )
197
+ func NewTxFetcher (hasTx func (common.Hash ) bool , addTxs func ([]* types.Transaction ) []error , fetchTxs func (string , []common.Hash ) error , dropPeer func ( string ) ) * TxFetcher {
198
+ return NewTxFetcherForTests (hasTx , addTxs , fetchTxs , dropPeer , mclock.System {}, nil )
188
199
}
189
200
190
201
// NewTxFetcherForTests is a testing method to mock out the realtime clock with
191
202
// a simulated version and the internal randomness with a deterministic one.
192
203
func NewTxFetcherForTests (
193
- hasTx func (common.Hash ) bool , addTxs func ([]* types.Transaction ) []error , fetchTxs func (string , []common.Hash ) error ,
204
+ hasTx func (common.Hash ) bool , addTxs func ([]* types.Transaction ) []error , fetchTxs func (string , []common.Hash ) error , dropPeer func ( string ),
194
205
clock mclock.Clock , rand * mrand.Rand ) * TxFetcher {
195
206
return & TxFetcher {
196
207
notify : make (chan * txAnnounce ),
@@ -199,8 +210,8 @@ func NewTxFetcherForTests(
199
210
quit : make (chan struct {}),
200
211
waitlist : make (map [common.Hash ]map [string ]struct {}),
201
212
waittime : make (map [common.Hash ]mclock.AbsTime ),
202
- waitslots : make (map [string ]map [common.Hash ]struct {} ),
203
- announces : make (map [string ]map [common.Hash ]struct {} ),
213
+ waitslots : make (map [string ]map [common.Hash ]* txMetadata ),
214
+ announces : make (map [string ]map [common.Hash ]* txMetadata ),
204
215
announced : make (map [common.Hash ]map [string ]struct {}),
205
216
fetching : make (map [common.Hash ]string ),
206
217
requests : make (map [string ]* txRequest ),
@@ -209,14 +220,15 @@ func NewTxFetcherForTests(
209
220
hasTx : hasTx ,
210
221
addTxs : addTxs ,
211
222
fetchTxs : fetchTxs ,
223
+ dropPeer : dropPeer ,
212
224
clock : clock ,
213
225
rand : rand ,
214
226
}
215
227
}
216
228
217
229
// Notify announces the fetcher of the potential availability of a new batch of
218
230
// transactions in the network.
219
- func (f * TxFetcher ) Notify (peer string , hashes []common.Hash ) error {
231
+ func (f * TxFetcher ) Notify (peer string , types [] byte , sizes [] uint32 , hashes []common.Hash ) error {
220
232
// Keep track of all the announced transactions
221
233
txAnnounceInMeter .Mark (int64 (len (hashes )))
222
234
@@ -226,28 +238,35 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
226
238
// still valuable to check here because it runs concurrent to the internal
227
239
// loop, so anything caught here is time saved internally.
228
240
var (
229
- unknowns = make ([]common.Hash , 0 , len (hashes ))
241
+ unknownHashes = make ([]common.Hash , 0 , len (hashes ))
242
+ unknownMetas = make ([]* txMetadata , 0 , len (hashes ))
243
+
230
244
duplicate int64
231
245
underpriced int64
232
246
)
233
- for _ , hash := range hashes {
247
+ for i , hash := range hashes {
234
248
switch {
235
249
case f .hasTx (hash ):
236
250
duplicate ++
237
251
case f .isKnownUnderpriced (hash ):
238
252
underpriced ++
239
253
default :
240
- unknowns = append (unknowns , hash )
254
+ unknownHashes = append (unknownHashes , hash )
255
+ if types == nil {
256
+ unknownMetas = append (unknownMetas , nil )
257
+ } else {
258
+ unknownMetas = append (unknownMetas , & txMetadata {kind : types [i ], size : sizes [i ]})
259
+ }
241
260
}
242
261
}
243
262
txAnnounceKnownMeter .Mark (duplicate )
244
263
txAnnounceUnderpricedMeter .Mark (underpriced )
245
264
246
265
// If anything's left to announce, push it into the internal loop
247
- if len (unknowns ) == 0 {
266
+ if len (unknownHashes ) == 0 {
248
267
return nil
249
268
}
250
- announce := & txAnnounce {origin : peer , hashes : unknowns }
269
+ announce := & txAnnounce {origin : peer , hashes : unknownHashes , metas : unknownMetas }
251
270
select {
252
271
case f .notify <- announce :
253
272
return nil
@@ -290,6 +309,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
290
309
// re-requesting them and dropping the peer in case of malicious transfers.
291
310
var (
292
311
added = make ([]common.Hash , 0 , len (txs ))
312
+ metas = make ([]txMetadata , 0 , len (txs ))
293
313
)
294
314
// proceed in batches
295
315
for i := 0 ; i < len (txs ); i += 128 {
@@ -325,6 +345,10 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
325
345
otherreject ++
326
346
}
327
347
added = append (added , batch [j ].Hash ())
348
+ metas = append (metas , txMetadata {
349
+ kind : batch [j ].Type (),
350
+ size : uint32 (batch [j ].Size ()),
351
+ })
328
352
}
329
353
knownMeter .Mark (duplicate )
330
354
underpricedMeter .Mark (underpriced )
@@ -337,7 +361,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
337
361
}
338
362
}
339
363
select {
340
- case f .cleanup <- & txDelivery {origin : peer , hashes : added , direct : direct }:
364
+ case f .cleanup <- & txDelivery {origin : peer , hashes : added , metas : metas , direct : direct }:
341
365
return nil
342
366
case <- f .quit :
343
367
return errTerminated
@@ -394,13 +418,15 @@ func (f *TxFetcher) loop() {
394
418
want := used + len (ann .hashes )
395
419
if want > maxTxAnnounces {
396
420
txAnnounceDOSMeter .Mark (int64 (want - maxTxAnnounces ))
421
+
397
422
ann .hashes = ann .hashes [:want - maxTxAnnounces ]
423
+ ann .metas = ann .metas [:want - maxTxAnnounces ]
398
424
}
399
425
// All is well, schedule the remainder of the transactions
400
426
idleWait := len (f .waittime ) == 0
401
427
_ , oldPeer := f .announces [ann .origin ]
402
428
403
- for _ , hash := range ann .hashes {
429
+ for i , hash := range ann .hashes {
404
430
// If the transaction is already downloading, add it to the list
405
431
// of possible alternates (in case the current retrieval fails) and
406
432
// also account it for the peer.
@@ -409,9 +435,9 @@ func (f *TxFetcher) loop() {
409
435
410
436
// Stage 2 and 3 share the set of origins per tx
411
437
if announces := f .announces [ann .origin ]; announces != nil {
412
- announces [hash ] = struct {}{}
438
+ announces [hash ] = ann . metas [ i ]
413
439
} else {
414
- f .announces [ann .origin ] = map [common.Hash ]struct {}{ hash : {} }
440
+ f .announces [ann .origin ] = map [common.Hash ]* txMetadata { hash : ann . metas [ i ] }
415
441
}
416
442
continue
417
443
}
@@ -422,22 +448,28 @@ func (f *TxFetcher) loop() {
422
448
423
449
// Stage 2 and 3 share the set of origins per tx
424
450
if announces := f .announces [ann .origin ]; announces != nil {
425
- announces [hash ] = struct {}{}
451
+ announces [hash ] = ann . metas [ i ]
426
452
} else {
427
- f .announces [ann .origin ] = map [common.Hash ]struct {}{ hash : {} }
453
+ f .announces [ann .origin ] = map [common.Hash ]* txMetadata { hash : ann . metas [ i ] }
428
454
}
429
455
continue
430
456
}
431
457
// If the transaction is already known to the fetcher, but not
432
458
// yet downloading, add the peer as an alternate origin in the
433
459
// waiting list.
434
460
if f .waitlist [hash ] != nil {
461
+ // Ignore double announcements from the same peer. This is
462
+ // especially important if metadata is also passed along to
463
+ // prevent malicious peers flip-flopping good/bad values.
464
+ if _ , ok := f.waitlist [hash ][ann.origin ]; ok {
465
+ continue
466
+ }
435
467
f.waitlist [hash ][ann.origin ] = struct {}{}
436
468
437
469
if waitslots := f .waitslots [ann .origin ]; waitslots != nil {
438
- waitslots [hash ] = struct {}{}
470
+ waitslots [hash ] = ann . metas [ i ]
439
471
} else {
440
- f .waitslots [ann .origin ] = map [common.Hash ]struct {}{ hash : {} }
472
+ f .waitslots [ann .origin ] = map [common.Hash ]* txMetadata { hash : ann . metas [ i ] }
441
473
}
442
474
continue
443
475
}
@@ -446,9 +478,9 @@ func (f *TxFetcher) loop() {
446
478
f .waittime [hash ] = f .clock .Now ()
447
479
448
480
if waitslots := f .waitslots [ann .origin ]; waitslots != nil {
449
- waitslots [hash ] = struct {}{}
481
+ waitslots [hash ] = ann . metas [ i ]
450
482
} else {
451
- f .waitslots [ann .origin ] = map [common.Hash ]struct {}{ hash : {} }
483
+ f .waitslots [ann .origin ] = map [common.Hash ]* txMetadata { hash : ann . metas [ i ] }
452
484
}
453
485
}
454
486
// If a new item was added to the waitlist, schedule it into the fetcher
@@ -474,9 +506,9 @@ func (f *TxFetcher) loop() {
474
506
f .announced [hash ] = f .waitlist [hash ]
475
507
for peer := range f .waitlist [hash ] {
476
508
if announces := f .announces [peer ]; announces != nil {
477
- announces [hash ] = struct {}{}
509
+ announces [hash ] = f. waitslots [ peer ][ hash ]
478
510
} else {
479
- f .announces [peer ] = map [common.Hash ]struct {}{ hash : {} }
511
+ f .announces [peer ] = map [common.Hash ]* txMetadata { hash : f. waitslots [ peer ][ hash ] }
480
512
}
481
513
delete (f .waitslots [peer ], hash )
482
514
if len (f .waitslots [peer ]) == 0 {
@@ -545,10 +577,27 @@ func (f *TxFetcher) loop() {
545
577
546
578
case delivery := <- f .cleanup :
547
579
// Independent if the delivery was direct or broadcast, remove all
548
- // traces of the hash from internal trackers
549
- for _ , hash := range delivery .hashes {
580
+ // traces of the hash from internal trackers. That said, compare any
581
+ // advertised metadata with the real ones and drop bad peers.
582
+ for i , hash := range delivery .hashes {
550
583
if _ , ok := f .waitlist [hash ]; ok {
551
584
for peer , txset := range f .waitslots {
585
+ if meta := txset [hash ]; meta != nil {
586
+ if delivery .metas [i ].kind != meta .kind {
587
+ log .Warn ("Announced transaction type mismatch" , "peer" , peer , "tx" , hash , "type" , delivery .metas [i ].kind , "ann" , meta .kind )
588
+ f .dropPeer (peer )
589
+ } else if delivery .metas [i ].size != meta .size {
590
+ log .Warn ("Announced transaction size mismatch" , "peer" , peer , "tx" , hash , "size" , delivery .metas [i ].size , "ann" , meta .size )
591
+ if math .Abs (float64 (delivery .metas [i ].size )- float64 (meta .size )) > 8 {
592
+ // Normally we should drop a peer considering this is a protocol violation.
593
+ // However, due to the RLP vs consensus format messyness, allow a few bytes
594
+ // wiggle-room where we only warn, but don't drop.
595
+ //
596
+ // TODO(karalabe): Get rid of this relaxation when clients are proven stable.
597
+ f .dropPeer (peer )
598
+ }
599
+ }
600
+ }
552
601
delete (txset , hash )
553
602
if len (txset ) == 0 {
554
603
delete (f .waitslots , peer )
@@ -558,6 +607,22 @@ func (f *TxFetcher) loop() {
558
607
delete (f .waittime , hash )
559
608
} else {
560
609
for peer , txset := range f .announces {
610
+ if meta := txset [hash ]; meta != nil {
611
+ if delivery .metas [i ].kind != meta .kind {
612
+ log .Warn ("Announced transaction type mismatch" , "peer" , peer , "tx" , hash , "type" , delivery .metas [i ].kind , "ann" , meta .kind )
613
+ f .dropPeer (peer )
614
+ } else if delivery .metas [i ].size != meta .size {
615
+ log .Warn ("Announced transaction size mismatch" , "peer" , peer , "tx" , hash , "size" , delivery .metas [i ].size , "ann" , meta .size )
616
+ if math .Abs (float64 (delivery .metas [i ].size )- float64 (meta .size )) > 8 {
617
+ // Normally we should drop a peer considering this is a protocol violation.
618
+ // However, due to the RLP vs consensus format messyness, allow a few bytes
619
+ // wiggle-room where we only warn, but don't drop.
620
+ //
621
+ // TODO(karalabe): Get rid of this relaxation when clients are proven stable.
622
+ f .dropPeer (peer )
623
+ }
624
+ }
625
+ }
561
626
delete (txset , hash )
562
627
if len (txset ) == 0 {
563
628
delete (f .announces , peer )
@@ -859,7 +924,7 @@ func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string))
859
924
860
925
// forEachHash does a range loop over a map of hashes in production, but during
861
926
// testing it does a deterministic sorted random to allow reproducing issues.
862
- func (f * TxFetcher ) forEachHash (hashes map [common.Hash ]struct {} , do func (hash common.Hash ) bool ) {
927
+ func (f * TxFetcher ) forEachHash (hashes map [common.Hash ]* txMetadata , do func (hash common.Hash ) bool ) {
863
928
// If we're running production, use whatever Go's map gives us
864
929
if f .rand == nil {
865
930
for hash := range hashes {
0 commit comments