29
29
)
30
30
from libp2p .peer .peerinfo import (
31
31
PeerInfo ,
32
+ peer_info_from_bytes ,
33
+ peer_info_to_bytes ,
32
34
)
33
35
from libp2p .peer .peerstore import (
34
36
PERMANENT_ADDR_TTL ,
@@ -86,6 +88,12 @@ class GossipSub(IPubsubRouter, Service):
86
88
direct_connect_initial_delay : float
87
89
direct_connect_interval : int
88
90
91
+ do_px : bool
92
+ px_peers_count : int
93
+ back_off : dict [str , dict [ID , int ]]
94
+ prune_back_off : int
95
+ unsubscribe_back_off : int
96
+
89
97
def __init__ (
90
98
self ,
91
99
protocols : Sequence [TProtocol ],
@@ -100,6 +108,10 @@ def __init__(
100
108
heartbeat_interval : int = 120 ,
101
109
direct_connect_initial_delay : float = 0.1 ,
102
110
direct_connect_interval : int = 300 ,
111
+ do_px : bool = False ,
112
+ px_peers_count : int = 16 ,
113
+ prune_back_off : int = 60 ,
114
+ unsubscribe_back_off : int = 10 ,
103
115
) -> None :
104
116
self .protocols = list (protocols )
105
117
self .pubsub = None
@@ -134,6 +146,12 @@ def __init__(
134
146
self .direct_connect_initial_delay = direct_connect_initial_delay
135
147
self .time_since_last_publish = {}
136
148
149
+ self .do_px = do_px
150
+ self .px_peers_count = px_peers_count
151
+ self .back_off = dict ()
152
+ self .prune_back_off = prune_back_off
153
+ self .unsubscribe_back_off = unsubscribe_back_off
154
+
137
155
async def run (self ) -> None :
138
156
self .manager .run_daemon_task (self .heartbeat )
139
157
if len (self .direct_peers ) > 0 :
@@ -243,8 +261,10 @@ async def publish(self, msg_forwarder: ID, pubsub_msg: rpc_pb2.Message) -> None:
243
261
if peer_id not in self .pubsub .peers :
244
262
continue
245
263
stream = self .pubsub .peers [peer_id ]
264
+
246
265
# TODO: Go use `sendRPC`, which possibly piggybacks gossip/control messages.
247
266
await self .pubsub .write_msg (stream , rpc_msg )
267
+
248
268
for topic in pubsub_msg .topicIDs :
249
269
self .time_since_last_publish [topic ] = int (time .time ())
250
270
@@ -322,15 +342,22 @@ async def join(self, topic: str) -> None:
322
342
self .mesh [topic ] = set ()
323
343
324
344
topic_in_fanout : bool = topic in self .fanout
325
- fanout_peers : set [ID ] = self .fanout [topic ] if topic_in_fanout else set ()
345
+ fanout_peers : set [ID ] = set ()
346
+
347
+ if topic_in_fanout :
348
+ for peer in self .fanout [topic ]:
349
+ if self ._check_back_off (peer , topic ):
350
+ continue
351
+ fanout_peers .add (peer )
352
+
326
353
fanout_size = len (fanout_peers )
327
354
if not topic_in_fanout or (topic_in_fanout and fanout_size < self .degree ):
328
355
# There are less than D peers (let this number be x)
329
356
# in the fanout for a topic (or the topic is not in the fanout).
330
357
# Selects the remaining number of peers (D-x) from peers.gossipsub[topic].
331
- if topic in self .pubsub .peer_topics :
358
+ if self . pubsub is not None and topic in self .pubsub .peer_topics :
332
359
selected_peers = self ._get_in_topic_gossipsub_peers_from_minus (
333
- topic , self .degree - fanout_size , fanout_peers
360
+ topic , self .degree - fanout_size , fanout_peers , True
334
361
)
335
362
# Combine fanout peers with selected peers
336
363
fanout_peers .update (selected_peers )
@@ -357,7 +384,8 @@ async def leave(self, topic: str) -> None:
357
384
return
358
385
# Notify the peers in mesh[topic] with a PRUNE(topic) message
359
386
for peer in self .mesh [topic ]:
360
- await self .emit_prune (topic , peer )
387
+ await self .emit_prune (topic , peer , self .do_px , True )
388
+ self ._add_back_off (peer , topic , True )
361
389
362
390
# Forget mesh[topic]
363
391
self .mesh .pop (topic , None )
@@ -447,8 +475,8 @@ async def heartbeat(self) -> None:
447
475
self .fanout_heartbeat ()
448
476
# Get the peers to send IHAVE to
449
477
peers_to_gossip = self .gossip_heartbeat ()
450
- # Pack GRAFT, PRUNE and IHAVE for the same peer into one control message and
451
- # send it
478
+ # Pack(piggyback) GRAFT, PRUNE and IHAVE for the same peer into
479
+ # one control message and send it
452
480
await self ._emit_control_msgs (
453
481
peers_to_graft , peers_to_prune , peers_to_gossip
454
482
)
@@ -493,7 +521,7 @@ def mesh_heartbeat(
493
521
if num_mesh_peers_in_topic < self .degree_low :
494
522
# Select D - |mesh[topic]| peers from peers.gossipsub[topic] - mesh[topic] # noqa: E501
495
523
selected_peers = self ._get_in_topic_gossipsub_peers_from_minus (
496
- topic , self .degree - num_mesh_peers_in_topic , self .mesh [topic ]
524
+ topic , self .degree - num_mesh_peers_in_topic , self .mesh [topic ], True
497
525
)
498
526
499
527
for peer in selected_peers :
@@ -556,9 +584,7 @@ def _handle_topic_heartbeat(
556
584
if len (in_topic_peers ) < self .degree :
557
585
# Select additional peers from peers.gossipsub[topic]
558
586
selected_peers = self ._get_in_topic_gossipsub_peers_from_minus (
559
- topic ,
560
- self .degree - len (in_topic_peers ),
561
- in_topic_peers ,
587
+ topic , self .degree - len (in_topic_peers ), in_topic_peers , True
562
588
)
563
589
# Add the selected peers
564
590
in_topic_peers .update (selected_peers )
@@ -569,7 +595,7 @@ def _handle_topic_heartbeat(
569
595
if msg_ids :
570
596
# Select D peers from peers.gossipsub[topic] excluding current peers
571
597
peers_to_emit_ihave_to = self ._get_in_topic_gossipsub_peers_from_minus (
572
- topic , self .degree , current_peers
598
+ topic , self .degree , current_peers , True
573
599
)
574
600
msg_id_strs = [str (msg_id ) for msg_id in msg_ids ]
575
601
for peer in peers_to_emit_ihave_to :
@@ -643,7 +669,11 @@ def select_from_minus(
643
669
return selection
644
670
645
671
def _get_in_topic_gossipsub_peers_from_minus (
646
- self , topic : str , num_to_select : int , minus : Iterable [ID ]
672
+ self ,
673
+ topic : str ,
674
+ num_to_select : int ,
675
+ minus : Iterable [ID ],
676
+ backoff_check : bool = False ,
647
677
) -> list [ID ]:
648
678
if self .pubsub is None :
649
679
raise NoPubsubAttached
@@ -652,8 +682,88 @@ def _get_in_topic_gossipsub_peers_from_minus(
652
682
for peer_id in self .pubsub .peer_topics [topic ]
653
683
if self .peer_protocol [peer_id ] == PROTOCOL_ID
654
684
}
685
+ if backoff_check :
686
+ # filter out peers that are in back off for this topic
687
+ gossipsub_peers_in_topic = {
688
+ peer_id
689
+ for peer_id in gossipsub_peers_in_topic
690
+ if self ._check_back_off (peer_id , topic ) is False
691
+ }
655
692
return self .select_from_minus (num_to_select , gossipsub_peers_in_topic , minus )
656
693
694
+ def _add_back_off (
695
+ self , peer : ID , topic : str , is_unsubscribe : bool , backoff_duration : int = 0
696
+ ) -> None :
697
+ """
698
+ Add back off for a peer in a topic.
699
+ :param peer: peer to add back off for
700
+ :param topic: topic to add back off for
701
+ :param is_unsubscribe: whether this is an unsubscribe operation
702
+ :param backoff_duration: duration of back off in seconds, if 0, use default
703
+ """
704
+ if topic not in self .back_off :
705
+ self .back_off [topic ] = dict ()
706
+
707
+ backoff_till = int (time .time ())
708
+ if backoff_duration > 0 :
709
+ backoff_till += backoff_duration
710
+ else :
711
+ if is_unsubscribe :
712
+ backoff_till += self .unsubscribe_back_off
713
+ else :
714
+ backoff_till += self .prune_back_off
715
+
716
+ if peer not in self .back_off [topic ]:
717
+ self .back_off [topic ][peer ] = backoff_till
718
+ else :
719
+ self .back_off [topic ][peer ] = max (self .back_off [topic ][peer ], backoff_till )
720
+
721
+ def _check_back_off (self , peer : ID , topic : str ) -> bool :
722
+ """
723
+ Check if a peer is in back off for a topic and cleanup expired back off entries.
724
+ :param peer: peer to check
725
+ :param topic: topic to check
726
+ :return: True if the peer is in back off, False otherwise
727
+ """
728
+ if topic not in self .back_off or peer not in self .back_off [topic ]:
729
+ return False
730
+ if self .back_off [topic ].get (peer , 0 ) > int (time .time ()):
731
+ return True
732
+ else :
733
+ del self .back_off [topic ][peer ]
734
+ return False
735
+
736
+ async def _do_px (self , px_peers : list [rpc_pb2 .PeerInfo ]) -> None :
737
+ if len (px_peers ) > self .px_peers_count :
738
+ px_peers = px_peers [: self .px_peers_count ]
739
+
740
+ for peer in px_peers :
741
+ peer_id : ID = ID (peer .peerID )
742
+
743
+ if self .pubsub and peer_id in self .pubsub .peers :
744
+ continue
745
+
746
+ try :
747
+ peer_info = peer_info_from_bytes (peer .signedPeerRecord )
748
+ try :
749
+ if self .pubsub is None :
750
+ raise NoPubsubAttached
751
+ await self .pubsub .host .connect (peer_info )
752
+ except Exception as e :
753
+ logger .warning (
754
+ "failed to connect to px peer %s: %s" ,
755
+ peer_id ,
756
+ e ,
757
+ )
758
+ continue
759
+ except Exception as e :
760
+ logger .warning (
761
+ "failed to parse peer info from px peer %s: %s" ,
762
+ peer_id ,
763
+ e ,
764
+ )
765
+ continue
766
+
657
767
# RPC handlers
658
768
659
769
async def handle_ihave (
@@ -737,24 +847,46 @@ async def handle_graft(
737
847
logger .warning (
738
848
"GRAFT: ignoring request from direct peer %s" , sender_peer_id
739
849
)
740
- await self .emit_prune (topic , sender_peer_id )
850
+ await self .emit_prune (topic , sender_peer_id , False , False )
741
851
return
742
852
853
+ if self ._check_back_off (sender_peer_id , topic ):
854
+ logger .warning (
855
+ "GRAFT: ignoring request from %s, back off until %d" ,
856
+ sender_peer_id ,
857
+ self .back_off [topic ][sender_peer_id ],
858
+ )
859
+ self ._add_back_off (sender_peer_id , topic , False )
860
+ await self .emit_prune (topic , sender_peer_id , False , False )
861
+ return
862
+
743
863
if sender_peer_id not in self .mesh [topic ]:
744
864
self .mesh [topic ].add (sender_peer_id )
745
865
else :
746
866
# Respond with PRUNE if not subscribed to the topic
747
- await self .emit_prune (topic , sender_peer_id )
867
+ await self .emit_prune (topic , sender_peer_id , self . do_px , False )
748
868
749
869
async def handle_prune (
750
870
self , prune_msg : rpc_pb2 .ControlPrune , sender_peer_id : ID
751
871
) -> None :
752
872
topic : str = prune_msg .topicID
873
+ backoff_till : int = prune_msg .backoff
874
+ px_peers : list [rpc_pb2 .PeerInfo ] = []
875
+ for peer in prune_msg .peers :
876
+ px_peers .append (peer )
753
877
754
878
# Remove peer from mesh for topic
755
879
if topic in self .mesh :
880
+ if backoff_till > 0 :
881
+ self ._add_back_off (sender_peer_id , topic , False , backoff_till )
882
+ else :
883
+ self ._add_back_off (sender_peer_id , topic , False )
884
+
756
885
self .mesh [topic ].discard (sender_peer_id )
757
886
887
+ if px_peers :
888
+ await self ._do_px (px_peers )
889
+
758
890
# RPC emitters
759
891
760
892
def pack_control_msgs (
@@ -803,15 +935,36 @@ async def emit_graft(self, topic: str, id: ID) -> None:
803
935
804
936
await self .emit_control_message (control_msg , id )
805
937
806
- async def emit_prune (self , topic : str , id : ID ) -> None :
938
+ async def emit_prune (
939
+ self , topic : str , to_peer : ID , do_px : bool , is_unsubscribe : bool
940
+ ) -> None :
807
941
"""Emit graft message, sent to to_peer, for topic."""
808
942
prune_msg : rpc_pb2 .ControlPrune = rpc_pb2 .ControlPrune ()
809
943
prune_msg .topicID = topic
810
944
945
+ back_off_duration = self .prune_back_off
946
+ if is_unsubscribe :
947
+ back_off_duration = self .unsubscribe_back_off
948
+
949
+ prune_msg .backoff = back_off_duration
950
+
951
+ if do_px :
952
+ exchange_peers = self ._get_in_topic_gossipsub_peers_from_minus (
953
+ topic , self .px_peers_count , [to_peer ]
954
+ )
955
+ for peer in exchange_peers :
956
+ if self .pubsub is None :
957
+ raise NoPubsubAttached
958
+ peer_info = self .pubsub .host .get_peerstore ().peer_info (peer )
959
+ signed_peer_record : rpc_pb2 .PeerInfo = rpc_pb2 .PeerInfo ()
960
+ signed_peer_record .peerID = peer .to_bytes ()
961
+ signed_peer_record .signedPeerRecord = peer_info_to_bytes (peer_info )
962
+ prune_msg .peers .append (signed_peer_record )
963
+
811
964
control_msg : rpc_pb2 .ControlMessage = rpc_pb2 .ControlMessage ()
812
965
control_msg .prune .extend ([prune_msg ])
813
966
814
- await self .emit_control_message (control_msg , id )
967
+ await self .emit_control_message (control_msg , to_peer )
815
968
816
969
async def emit_control_message (
817
970
self , control_msg : rpc_pb2 .ControlMessage , to_peer : ID
0 commit comments