@@ -125,20 +125,6 @@ const
125125 # # value in milliseconds
126126 initialLookups = 1 # # Amount of lookups done when populating the routing table
127127
128- # These are the concurrent offers per Portal wire protocol that is running.
129- # Using the `offerQueue` allows for limiting the amount of offers send and
130- # thus how many streams can be started.
131- # TODO :
132- # More thought needs to go into this as it is currently on a per network
133- # basis. Keep it simple like that? Or limit it better at the stream transport
134- # level? In the latter case, this might still need to be checked/blocked at
135- # the very start of sending the offer, because blocking/waiting too long
136- # between the received accept message and actually starting the stream and
137- # sending data could give issues due to timeouts on the other side.
138- # And then there are still limits to be applied also for FindContent and the
139- # incoming directions.
140- concurrentOffers = 50
141-
142128type
143129 ToContentIdHandler * =
144130 proc (contentKey: ContentKeyByteList ): results.Opt [ContentId ] {.raises : [], gcsafe .}
@@ -591,7 +577,7 @@ proc new*(
591577 bootstrapRecords: @ bootstrapRecords,
592578 stream: stream,
593579 radiusCache: RadiusCache .init (256 ),
594- offerQueue: newAsyncQueue [OfferRequest ](concurrentOffers ),
580+ offerQueue: newAsyncQueue [OfferRequest ](config.maxConcurrentOffers ),
595581 pingTimings: Table [NodeId , chronos.Moment ](),
596582 config: config,
597583 )
@@ -1758,7 +1744,19 @@ proc start*(p: PortalProtocol) =
17581744 p.refreshLoop = refreshLoop (p)
17591745 p.revalidateLoop = revalidateLoop (p)
17601746
1761- for i in 0 ..< concurrentOffers:
1747+ # These are the concurrent offers per Portal wire protocol that is running.
1748+ # Using the `offerQueue` allows for limiting the amount of offers send and
1749+ # thus how many streams can be started.
1750+ # TODO :
1751+ # More thought needs to go into this as it is currently on a per network
1752+ # basis. Keep it simple like that? Or limit it better at the stream transport
1753+ # level? In the latter case, this might still need to be checked/blocked at
1754+ # the very start of sending the offer, because blocking/waiting too long
1755+ # between the received accept message and actually starting the stream and
1756+ # sending data could give issues due to timeouts on the other side.
1757+ # And then there are still limits to be applied also for FindContent and the
1758+ # incoming directions.
1759+ for i in 0 ..< p.config.maxConcurrentOffers:
17621760 p.offerWorkers.add (offerWorker (p))
17631761
17641762proc stop * (p: PortalProtocol ) {.async : (raises: []).} =
0 commit comments