@@ -97,10 +97,16 @@ def __init__(self, hs):
9797 self .state = hs .get_state_handler ()
9898
9999 self .device_handler = hs .get_device_handler ()
100+ self ._federation_ratelimiter = hs .get_federation_ratelimiter ()
100101
101102 self ._server_linearizer = Linearizer ("fed_server" )
102103 self ._transaction_linearizer = Linearizer ("fed_txn_handler" )
103104
105+ # We cache results for transaction with the same ID
106+ self ._transaction_resp_cache = ResponseCache (
107+ hs , "fed_txn_handler" , timeout_ms = 30000
108+ )
109+
104110 self .transaction_actions = TransactionActions (self .store )
105111
106112 self .registry = hs .get_federation_registry ()
@@ -135,22 +141,44 @@ async def on_incoming_transaction(
135141 request_time = self ._clock .time_msec ()
136142
137143 transaction = Transaction (** transaction_data )
144+ transaction_id = transaction .transaction_id # type: ignore
138145
139- if not transaction . transaction_id : # type: ignore
146+ if not transaction_id :
140147 raise Exception ("Transaction missing transaction_id" )
141148
142- logger .debug ("[%s] Got transaction" , transaction . transaction_id ) # type: ignore
149+ logger .debug ("[%s] Got transaction" , transaction_id )
143150
144- # use a linearizer to ensure that we don't process the same transaction
145- # multiple times in parallel.
146- with (
147- await self ._transaction_linearizer .queue (
148- (origin , transaction .transaction_id ) # type: ignore
149- )
150- ):
151- result = await self ._handle_incoming_transaction (
152- origin , transaction , request_time
153- )
151+ # We wrap in a ResponseCache so that we de-duplicate retried
152+ # transactions.
153+ return await self ._transaction_resp_cache .wrap (
154+ (origin , transaction_id ),
155+ self ._on_incoming_transaction_inner ,
156+ origin ,
157+ transaction ,
158+ request_time ,
159+ )
160+
161+ async def _on_incoming_transaction_inner (
162+ self , origin : str , transaction : Transaction , request_time : int
163+ ) -> Tuple [int , Dict [str , Any ]]:
164+ # Use a linearizer to ensure that transactions from a remote are
165+ # processed in order.
166+ with await self ._transaction_linearizer .queue (origin ):
167+ # We rate limit here *after* we've queued up the incoming requests,
168+ # so that we don't fill up the ratelimiter with blocked requests.
169+ #
170+ # This is important as the ratelimiter allows N concurrent requests
171+ # at a time, and only starts ratelimiting if there are more requests
172+ # than that being processed at a time. If we queued up requests in
173+ # the linearizer/response cache *after* the ratelimiting then those
174+ # queued up requests would count as part of the allowed limit of N
175+ # concurrent requests.
176+ with self ._federation_ratelimiter .ratelimit (origin ) as d :
177+ await d
178+
179+ result = await self ._handle_incoming_transaction (
180+ origin , transaction , request_time
181+ )
154182
155183 return result
156184
0 commit comments