Skip to content

Commit b671499

Browse files
authored
fluffy: Make concurrent offers configurable at cli (#2854)
1 parent 6086c29 commit b671499

File tree

5 files changed

+36
-26
lines changed

5 files changed

+36
-26
lines changed

fluffy/conf.nim

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ type
232232
"the same machines. The option might be removed/adjusted in the future",
233233
defaultValue: defaultPortalProtocolConfig.tableIpLimits.tableIpLimit,
234234
defaultValueDesc: $defaultTableIpLimitDesc,
235-
name: "table-ip-limit"
235+
name: "debug-table-ip-limit"
236236
.}: uint
237237

238238
bucketIpLimit* {.
@@ -243,23 +243,30 @@ type
243243
"the same machines. The option might be removed/adjusted in the future",
244244
defaultValue: defaultPortalProtocolConfig.tableIpLimits.bucketIpLimit,
245245
defaultValueDesc: $defaultBucketIpLimitDesc,
246-
name: "bucket-ip-limit"
246+
name: "debug-bucket-ip-limit"
247247
.}: uint
248248

249249
bitsPerHop* {.
250250
hidden,
251251
desc: "Kademlia's b variable, increase for less hops per lookup",
252252
defaultValue: defaultPortalProtocolConfig.bitsPerHop,
253253
defaultValueDesc: $defaultBitsPerHopDesc,
254-
name: "bits-per-hop"
254+
name: "debug-bits-per-hop"
255255
.}: int
256256

257257
maxGossipNodes* {.
258258
hidden,
259259
desc: "The maximum number of nodes to send content to during gossip",
260260
defaultValue: defaultPortalProtocolConfig.maxGossipNodes,
261261
defaultValueDesc: $defaultMaxGossipNodesDesc,
262-
name: "max-gossip-nodes"
262+
name: "debug-max-gossip-nodes"
263+
.}: int
264+
265+
maxConcurrentOffers* {.
266+
hidden,
267+
desc: "The maximum number of offers to send concurrently",
268+
defaultValue: defaultPortalProtocolConfig.maxConcurrentOffers,
269+
name: "debug-max-concurrent-offers"
263270
.}: int
264271

265272
radiusConfig* {.
@@ -316,14 +323,14 @@ type
316323
"Size of the in memory local content cache. This is the max number " &
317324
"of content values that can be stored in the cache.",
318325
defaultValue: defaultPortalProtocolConfig.contentCacheSize,
319-
name: "content-cache-size"
326+
name: "debug-content-cache-size"
320327
.}: int
321328

322329
disableContentCache* {.
323330
hidden,
324331
desc: "Disable the in memory local content cache",
325332
defaultValue: defaultPortalProtocolConfig.disableContentCache,
326-
name: "disable-content-cache"
333+
name: "debug-disable-content-cache"
327334
.}: bool
328335

329336
disablePoke* {.

fluffy/fluffy.nim

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ proc run(
183183
portalProtocolConfig = PortalProtocolConfig.init(
184184
config.tableIpLimit, config.bucketIpLimit, config.bitsPerHop, config.radiusConfig,
185185
config.disablePoke, config.maxGossipNodes, config.contentCacheSize,
186-
config.disableContentCache,
186+
config.disableContentCache, config.maxConcurrentOffers,
187187
)
188188

189189
portalNodeConfig = PortalNodeConfig(

fluffy/network/wire/portal_protocol.nim

Lines changed: 14 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -125,20 +125,6 @@ const
125125
## value in milliseconds
126126
initialLookups = 1 ## Amount of lookups done when populating the routing table
127127

128-
# These are the concurrent offers per Portal wire protocol that is running.
129-
# Using the `offerQueue` allows for limiting the amount of offers send and
130-
# thus how many streams can be started.
131-
# TODO:
132-
# More thought needs to go into this as it is currently on a per network
133-
# basis. Keep it simple like that? Or limit it better at the stream transport
134-
# level? In the latter case, this might still need to be checked/blocked at
135-
# the very start of sending the offer, because blocking/waiting too long
136-
# between the received accept message and actually starting the stream and
137-
# sending data could give issues due to timeouts on the other side.
138-
# And then there are still limits to be applied also for FindContent and the
139-
# incoming directions.
140-
concurrentOffers = 50
141-
142128
type
143129
ToContentIdHandler* =
144130
proc(contentKey: ContentKeyByteList): results.Opt[ContentId] {.raises: [], gcsafe.}
@@ -591,7 +577,7 @@ proc new*(
591577
bootstrapRecords: @bootstrapRecords,
592578
stream: stream,
593579
radiusCache: RadiusCache.init(256),
594-
offerQueue: newAsyncQueue[OfferRequest](concurrentOffers),
580+
offerQueue: newAsyncQueue[OfferRequest](config.maxConcurrentOffers),
595581
pingTimings: Table[NodeId, chronos.Moment](),
596582
config: config,
597583
)
@@ -1758,7 +1744,19 @@ proc start*(p: PortalProtocol) =
17581744
p.refreshLoop = refreshLoop(p)
17591745
p.revalidateLoop = revalidateLoop(p)
17601746

1761-
for i in 0 ..< concurrentOffers:
1747+
# These are the concurrent offers per Portal wire protocol that is running.
1748+
# Using the `offerQueue` allows for limiting the amount of offers send and
1749+
# thus how many streams can be started.
1750+
# TODO:
1751+
# More thought needs to go into this as it is currently on a per network
1752+
# basis. Keep it simple like that? Or limit it better at the stream transport
1753+
# level? In the latter case, this might still need to be checked/blocked at
1754+
# the very start of sending the offer, because blocking/waiting too long
1755+
# between the received accept message and actually starting the stream and
1756+
# sending data could give issues due to timeouts on the other side.
1757+
# And then there are still limits to be applied also for FindContent and the
1758+
# incoming directions.
1759+
for i in 0 ..< p.config.maxConcurrentOffers:
17621760
p.offerWorkers.add(offerWorker(p))
17631761

17641762
proc stop*(p: PortalProtocol) {.async: (raises: []).} =

fluffy/network/wire/portal_protocol_config.nim

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ type
4343
maxGossipNodes*: int
4444
contentCacheSize*: int
4545
disableContentCache*: bool
46+
maxConcurrentOffers*: int
4647

4748
const
4849
defaultRadiusConfig* = RadiusConfig(kind: Dynamic)
@@ -51,6 +52,7 @@ const
5152
defaultMaxGossipNodes* = 4
5253
defaultContentCacheSize* = 100
5354
defaultDisableContentCache* = false
55+
defaultMaxConcurrentOffers* = 50
5456
revalidationTimeout* = chronos.seconds(30)
5557

5658
defaultPortalProtocolConfig* = PortalProtocolConfig(
@@ -61,6 +63,7 @@ const
6163
maxGossipNodes: defaultMaxGossipNodes,
6264
contentCacheSize: defaultContentCacheSize,
6365
disableContentCache: defaultDisableContentCache,
66+
maxConcurrentOffers: defaultMaxConcurrentOffers,
6467
)
6568

6669
proc init*(
@@ -73,6 +76,7 @@ proc init*(
7376
maxGossipNodes: int,
7477
contentCacheSize: int,
7578
disableContentCache: bool,
79+
maxConcurrentOffers: int,
7680
): T =
7781
PortalProtocolConfig(
7882
tableIpLimits:
@@ -83,6 +87,7 @@ proc init*(
8387
maxGossipNodes: maxGossipNodes,
8488
contentCacheSize: contentCacheSize,
8589
disableContentCache: disableContentCache,
90+
maxConcurrentOffers: maxConcurrentOffers,
8691
)
8792

8893
func fromLogRadius*(T: type UInt256, logRadius: uint16): T =

fluffy/scripts/launch_local_testnet.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -342,9 +342,9 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do
342342
--metrics \
343343
--metrics-address="127.0.0.1" \
344344
--metrics-port="$(( BASE_METRICS_PORT + NUM_NODE ))" \
345-
--table-ip-limit=1024 \
346-
--bucket-ip-limit=24 \
347-
--bits-per-hop=1 \
345+
--debug-table-ip-limit=1024 \
346+
--debug-bucket-ip-limit=24 \
347+
--debug-bits-per-hop=1 \
348348
--portal-subnetworks="${PORTAL_SUBNETWORKS}" \
349349
--disable-state-root-validation="${DISABLE_STATE_ROOT_VALIDATION}" \
350350
${TRUSTED_BLOCK_ROOT_ARG} \

0 commit comments

Comments
 (0)