7
7
import operator
8
8
import random
9
9
import struct
10
- import time
11
10
from abc import (
12
11
ABC ,
13
12
abstractmethod
39
38
40
39
from eth_utils import (
41
40
decode_hex ,
41
+ to_tuple ,
42
42
)
43
43
44
44
from eth_typing import BlockNumber , Hash32
@@ -155,14 +155,6 @@ class BasePeer(BaseService):
155
155
head_td : int = None
156
156
head_hash : Hash32 = None
157
157
158
- # TODO: Instead of a fixed timeout, we should instead monitor response
159
- # times for the peer and adjust our timeout accordingly
160
- _response_timeout = 60
161
- pending_requests : Dict [
162
- Type [protocol .Command ],
163
- Tuple ['BaseRequest' , 'asyncio.Future[protocol._DecodedMsgType]' ],
164
- ]
165
-
166
158
def __init__ (self ,
167
159
remote : Node ,
168
160
privkey : datatypes .PrivateKey ,
@@ -189,8 +181,6 @@ def __init__(self,
189
181
self .start_time = datetime .datetime .now ()
190
182
self .received_msgs : Dict [protocol .Command , int ] = collections .defaultdict (int )
191
183
192
- self .pending_requests = {}
193
-
194
184
self .egress_mac = egress_mac
195
185
self .ingress_mac = ingress_mac
196
186
# FIXME: Yes, the encryption is insecure, see: https://github.com/ethereum/devp2p/issues/32
@@ -210,6 +200,18 @@ async def process_sub_proto_handshake(
210
200
self , cmd : protocol .Command , msg : protocol ._DecodedMsgType ) -> None :
211
201
raise NotImplementedError ("Must be implemented by subclasses" )
212
202
203
+ @contextlib .contextmanager
204
+ def collect_sub_proto_messages (self ) -> Iterator ['MsgBuffer' ]:
205
+ """
206
+ Can be used to gather up all messages that are sent to the peer.
207
+ """
208
+ if not self .is_running :
209
+ raise RuntimeError ("Cannot collect messages if peer is not running" )
210
+ msg_buffer = MsgBuffer ()
211
+
212
+ with msg_buffer .subscribe_peer (self ):
213
+ yield msg_buffer
214
+
213
215
@property
214
216
def received_msgs_count (self ) -> int :
215
217
return sum (self .received_msgs .values ())
@@ -400,22 +402,6 @@ def handle_sub_proto_msg(self, cmd: protocol.Command, msg: protocol._DecodedMsgT
400
402
else :
401
403
self .logger .warn ("Peer %s has no subscribers, discarding %s msg" , self , cmd )
402
404
403
- if cmd_type in self .pending_requests :
404
- request , future = self .pending_requests [cmd_type ]
405
- try :
406
- request .validate_response (msg )
407
- except ValidationError as err :
408
- self .logger .debug (
409
- "Response validation failure for pending %s request from peer %s: %s" ,
410
- cmd_type .__name__ ,
411
- self ,
412
- err ,
413
- )
414
- pass
415
- else :
416
- future .set_result (msg )
417
- self .pending_requests .pop (cmd_type )
418
-
419
405
def process_msg (self , cmd : protocol .Command , msg : protocol ._DecodedMsgType ) -> None :
420
406
if cmd .is_base_protocol :
421
407
self .handle_p2p_msg (cmd , msg )
@@ -640,6 +626,25 @@ def subscribe(self, peer_pool: 'PeerPool') -> Iterator[None]:
640
626
finally :
641
627
peer_pool .unsubscribe (self )
642
628
629
+ @contextlib .contextmanager
630
+ def subscribe_peer (self , peer : BasePeer ) -> Iterator [None ]:
631
+ peer .add_subscriber (self )
632
+ try :
633
+ yield
634
+ finally :
635
+ peer .remove_subscriber (self )
636
+
637
+
638
+ class MsgBuffer (PeerSubscriber ):
639
+ logger = logging .getLogger ('p2p.peer.MsgBuffer' )
640
+ msg_queue_maxsize = 500
641
+ subscription_msg_types = {protocol .Command }
642
+
643
+ @to_tuple
644
+ def get_messages (self ) -> Iterator ['PEER_MSG_TYPE' ]:
645
+ while not self .msg_queue .empty ():
646
+ yield self .msg_queue .get_nowait ()
647
+
643
648
644
649
class PeerPool (BaseService , AsyncIterable [BasePeer ]):
645
650
"""
@@ -695,28 +700,33 @@ def unsubscribe(self, subscriber: PeerSubscriber) -> None:
695
700
peer .remove_subscriber (subscriber )
696
701
697
702
async def start_peer (self , peer : BasePeer ) -> None :
703
+ asyncio .ensure_future (peer .run ())
704
+ await self .wait (peer .events .started .wait (), timeout = 1 )
698
705
try :
699
706
# Although connect() may seem like a more appropriate place to perform the DAO fork
700
707
# check, we do it here because we want to perform it for incoming peer connections as
701
708
# well.
702
- msgs = await self .ensure_same_side_on_dao_fork (peer )
709
+ with peer .collect_sub_proto_messages () as buffer :
710
+ await self .ensure_same_side_on_dao_fork (peer )
703
711
except DAOForkCheckFailure as err :
704
712
self .logger .debug ("DAO fork check with %s failed: %s" , peer , err )
705
713
await peer .disconnect (DisconnectReason .useless_peer )
706
714
return
707
- asyncio .ensure_future (peer .run (finished_callback = self ._peer_finished ))
708
- self ._add_peer (peer , msgs )
715
+ else :
716
+ msgs = tuple ((cmd , msg ) for _ , cmd , msg in buffer .get_messages ())
717
+ self ._add_peer (peer , msgs )
709
718
710
719
def _add_peer (self ,
711
720
peer : BasePeer ,
712
- msgs : List [Tuple [protocol .Command , protocol ._DecodedMsgType ]]) -> None :
721
+ msgs : Tuple [Tuple [protocol .Command , protocol ._DecodedMsgType ], ... ]) -> None :
713
722
"""Add the given peer to the pool.
714
723
715
724
Appart from adding it to our list of connected nodes and adding each of our subscriber's
716
725
to the peer, we also add the given messages to our subscriber's queues.
717
726
"""
718
727
self .logger .info ('Adding %s to pool' , peer )
719
728
self .connected_nodes [peer .remote ] = peer
729
+ peer .add_finished_callback (self ._peer_finished )
720
730
for subscriber in self ._subscribers :
721
731
subscriber .register_peer (peer )
722
732
peer .add_subscriber (subscriber )
@@ -787,15 +797,13 @@ async def connect_to_nodes(self, nodes: Iterator[Node]) -> None:
787
797
await self .start_peer (peer )
788
798
789
799
async def ensure_same_side_on_dao_fork (
790
- self , peer : BasePeer ) -> List [ Tuple [ protocol . Command , protocol . _DecodedMsgType ]] :
800
+ self , peer : BasePeer ) -> None :
791
801
"""Ensure we're on the same side of the DAO fork as the given peer.
792
802
793
803
In order to do that we have to request the DAO fork block and its parent, but while we
794
804
wait for that we may receive other messages from the peer, which are returned so that they
795
805
can be re-added to our subscribers' queues when the peer is finally added to the pool.
796
806
"""
797
- from trinity .protocol .base_block_headers import BaseBlockHeaders
798
- msgs = []
799
807
for start_block , vm_class in self .vm_configuration :
800
808
if not issubclass (vm_class , HomesteadVM ):
801
809
continue
@@ -806,46 +814,33 @@ async def ensure_same_side_on_dao_fork(
806
814
break
807
815
808
816
start_block = vm_class .dao_fork_block_number - 1
809
- # TODO: This can be either an `ETHPeer` or an `LESPeer`. Will be
810
- # fixed once full awaitable request API is completed.
811
- request = peer .request_block_headers ( # type: ignore
812
- start_block ,
813
- max_headers = 2 ,
814
- reverse = False ,
815
- )
816
- start = time .time ()
817
+
817
818
try :
818
- while True :
819
- elapsed = int (time .time () - start )
820
- remaining_timeout = max (0 , CHAIN_SPLIT_CHECK_TIMEOUT - elapsed )
821
- cmd , msg = await self .wait (
822
- peer .read_msg (), timeout = remaining_timeout )
823
- if isinstance (cmd , BaseBlockHeaders ):
824
- headers = cmd .extract_headers (msg )
825
- break
826
- else :
827
- msgs .append ((cmd , msg ))
828
- continue
819
+ headers = await peer .requests .get_block_headers ( # type: ignore
820
+ start_block ,
821
+ max_headers = 2 ,
822
+ reverse = False ,
823
+ timeout = CHAIN_SPLIT_CHECK_TIMEOUT ,
824
+ )
825
+
829
826
except (TimeoutError , PeerConnectionLost ) as err :
830
827
raise DAOForkCheckFailure (
831
- "Timed out waiting for DAO fork header from {}: {}" .format (peer , err ))
828
+ "Timed out waiting for DAO fork header from {}: {}" .format (peer , err )
829
+ ) from err
832
830
except MalformedMessage as err :
833
831
raise DAOForkCheckFailure (
834
832
"Malformed message while doing DAO fork check with {0}: {1}" .format (
835
833
peer , err ,
836
834
)
837
835
) from err
838
-
839
- try :
840
- request .validate_headers (headers )
841
836
except ValidationError as err :
842
837
raise DAOForkCheckFailure (
843
838
"Invalid header response during DAO fork check: {}" .format (err )
844
- )
839
+ ) from err
845
840
846
841
if len (headers ) != 2 :
847
842
raise DAOForkCheckFailure (
848
- "Peer failed to return all requested headers for DAO fork check"
843
+ "Peer %s failed to return DAO fork check headers" . format ( peer )
849
844
)
850
845
else :
851
846
parent , header = headers
@@ -855,8 +850,6 @@ async def ensure_same_side_on_dao_fork(
855
850
except EthValidationError as err :
856
851
raise DAOForkCheckFailure ("Peer failed DAO fork check validation: {}" .format (err ))
857
852
858
- return msgs
859
-
860
853
def _peer_finished (self , peer : BaseService ) -> None :
861
854
"""Remove the given peer from our list of connected nodes.
862
855
This is passed as a callback to be called when a peer finishes.
@@ -1008,7 +1001,9 @@ def _test() -> None:
1008
1001
from eth .chains .ropsten import RopstenChain , ROPSTEN_GENESIS_HEADER , ROPSTEN_VM_CONFIGURATION
1009
1002
from eth .db .backends .memory import MemoryDB
1010
1003
from trinity .protocol .eth .peer import ETHPeer
1004
+ from trinity .protocol .eth .requests import HeaderRequest as ETHHeaderRequest
1011
1005
from trinity .protocol .les .peer import LESPeer
1006
+ from trinity .protocol .les .requests import HeaderRequest as LESHeaderRequest
1012
1007
from tests .p2p .integration_test_helpers import FakeAsyncHeaderDB , connect_to_peers_loop
1013
1008
logging .basicConfig (level = TRACE_LEVEL_NUM , format = '%(asctime)s %(levelname)s: %(message)s' )
1014
1009
@@ -1041,13 +1036,15 @@ async def request_stuff() -> None:
1041
1036
'0x59af08ab31822c992bb3dad92ddb68d820aa4c69e9560f07081fa53f1009b152' )
1042
1037
if peer_class == ETHPeer :
1043
1038
peer = cast (ETHPeer , peer )
1044
- peer .sub_proto .send_get_block_headers (block_hash , 1 , 0 , False )
1039
+ peer .sub_proto .send_get_block_headers (ETHHeaderRequest ( block_hash , 1 , 0 , False ) )
1045
1040
peer .sub_proto .send_get_block_bodies ([block_hash ])
1046
1041
peer .sub_proto .send_get_receipts ([block_hash ])
1047
1042
else :
1048
1043
peer = cast (LESPeer , peer )
1049
1044
request_id = 1
1050
- peer .sub_proto .send_get_block_headers (block_hash , 1 , 0 , False , request_id )
1045
+ peer .sub_proto .send_get_block_headers (
1046
+ LESHeaderRequest (block_hash , 1 , 0 , False , request_id )
1047
+ )
1051
1048
peer .sub_proto .send_get_block_bodies ([block_hash ], request_id + 1 )
1052
1049
peer .sub_proto .send_get_receipts (block_hash , request_id + 2 )
1053
1050
0 commit comments