From fb5f0b44ccb859673ad759274d74693f16427c81 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Mon, 29 Sep 2025 15:48:19 +0200 Subject: [PATCH 01/43] Initial commit with copy-pasted TxSubmissionV2 for ObjectDiffusionV2 Co-authored-by: nbacquey --- .../Ouroboros/Consensus/Network/NodeToNode.hs | 4 +- ouroboros-consensus/ouroboros-consensus.cabal | 11 +- .../{Inbound.hs => Inbound/V1.hs} | 2 +- .../ObjectDiffusion/Inbound/{ => V1}/State.hs | 2 +- .../ObjectDiffusion/Inbound/V2.hs | 225 ++++++ .../ObjectDiffusion/Inbound/V2/Decision.hs | 532 +++++++++++++++ .../ObjectDiffusion/Inbound/V2/Policy.hs | 68 ++ .../ObjectDiffusion/Inbound/V2/Registry.hs | 599 ++++++++++++++++ .../ObjectDiffusion/Inbound/V2/State.hs | 646 ++++++++++++++++++ .../ObjectDiffusion/Inbound/V2/Types.hs | 427 ++++++++++++ .../ObjectDiffusion/ObjectPool/API.hs | 4 + .../MiniProtocol/ObjectDiffusion/PerasCert.hs | 4 +- .../MiniProtocol/ObjectDiffusion/Smoke.hs | 2 +- 13 files changed, 2517 insertions(+), 9 deletions(-) rename ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/{Inbound.hs => Inbound/V1.hs} (99%) rename ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/{ => V1}/State.hs (99%) create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Network/NodeToNode.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Network/NodeToNode.hs index 8b8f27b7b2..494faa0505 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Network/NodeToNode.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Network/NodeToNode.hs @@ -70,8 +70,8 @@ import Ouroboros.Consensus.MiniProtocol.ChainSync.Client ) import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client as CsClient import Ouroboros.Consensus.MiniProtocol.ChainSync.Server -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound (objectDiffusionInbound) -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.State +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1 (objectDiffusionInbound) +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1.State ( ObjectDiffusionInboundStateView , bracketObjectDiffusionInbound ) diff --git a/ouroboros-consensus/ouroboros-consensus.cabal b/ouroboros-consensus/ouroboros-consensus.cabal index acbca582c1..880a7f8d94 100644 --- a/ouroboros-consensus/ouroboros-consensus.cabal +++ b/ouroboros-consensus/ouroboros-consensus.cabal @@ -191,8 +191,14 @@ library Ouroboros.Consensus.MiniProtocol.LocalStateQuery.Server Ouroboros.Consensus.MiniProtocol.LocalTxMonitor.Server Ouroboros.Consensus.MiniProtocol.LocalTxSubmission.Server - Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound - Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.State + Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1 + Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1.State + Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 + Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision + Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy + Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry + Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State + Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.PerasCert Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Outbound @@ -354,6 +360,7 @@ library primitive, psqueues ^>=0.2.3, quiet ^>=0.2, + random, rawlock ^>=0.1.1, resource-registry ^>=0.1, semialign >=1.1, diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs similarity index 99% rename from ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound.hs rename to ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs index a368682c40..51d65c469e 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs @@ -11,7 +11,7 @@ {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} -module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound +module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1 ( objectDiffusionInbound , TraceObjectDiffusionInbound (..) , ObjectDiffusionInboundError (..) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1/State.hs similarity index 99% rename from ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/State.hs rename to ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1/State.hs index 58402da64f..3aa84c3915 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1/State.hs @@ -6,7 +6,7 @@ {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE UndecidableInstances #-} -module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.State +module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1.State ( ObjectDiffusionInboundState (..) , initObjectDiffusionInboundState , ObjectDiffusionInboundHandle (..) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs new file mode 100644 index 0000000000..7a82b0e3d6 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -0,0 +1,225 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE KindSignatures #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE ScopedTypeVariables #-} + +module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 + ( -- * ObjectDiffusion Inbound client + objectSubmissionInboundV2 + + -- * PeerObjectAPI + , withPeer + , PeerObjectAPI + + -- * Supporting types + , module V2 + , ObjectChannelsVar + , newObjectChannelsVar + , ObjectObjectPoolSem + , newObjectObjectPoolSem + , SharedObjectStateVar + , newSharedObjectStateVar + , ObjectDecisionPolicy (..) + , defaultObjectDecisionPolicy + ) where + +import Control.Exception (assert) +import Control.Monad (unless, when) +import Control.Monad.Class.MonadThrow +import Control.Monad.Class.MonadTimer.SI +import Control.Tracer (Tracer, traceWith) +import qualified Data.List.NonEmpty as NonEmpty +import qualified Data.Map.Strict as Map +import qualified Data.Sequence.Strict as StrictSeq +import qualified Data.Set as Set +import Network.TypedProtocol +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types as V2 +import Ouroboros.Network.Protocol.ObjectDiffusion.Inbound + +-- | A object-submission inbound side (server, sic!). +-- +-- The server blocks on receiving `ObjectDecision` from the decision logic. If +-- there are object's to download it pipelines two requests: first for object's second +-- for objectId's. If there are no object's to download, it either sends a blocking or +-- non-blocking request for objectId's. +objectSubmissionInboundV2 :: + forall objectId object idx m. + ( MonadDelay m + , MonadThrow m + , Ord objectId + ) => + Tracer m (TraceObjectDiffusionInbound objectId object) -> + ObjectDiffusionInitDelay -> + ObjectDiffusionObjectPoolWriter objectId object idx m -> + PeerObjectAPI m objectId object -> + ObjectDiffusionServerPipelined objectId object m () +objectSubmissionInboundV2 + tracer + initDelay + ObjectDiffusionObjectPoolWriter{objectId} + PeerObjectAPI + { readObjectDecision + , handleReceivedObjectIds + , handleReceivedObjects + , submitObjectToObjectPool + } = + ObjectDiffusionServerPipelined $ do + case initDelay of + ObjectDiffusionInitDelay delay -> threadDelay delay + NoObjectDiffusionInitDelay -> return () + serverIdle + where + serverIdle :: + m (ServerStIdle Z objectId object m ()) + serverIdle = do + -- Block on next decision. + object@ObjectDecision + { objectsToRequest = objectsToRequest + , objectsToObjectPool = ObjectsToObjectPool{listOfObjectsToObjectPool} + } <- + readObjectDecision + traceWith tracer (TraceObjectInboundDecision object) + + let !collected = length listOfObjectsToObjectPool + + -- Only attempt to add OBJECTs if we have some work to do + when (collected > 0) $ do + -- submitObjectToObjectPool traces: + -- \* `TraceObjectDiffusionProcessed`, + -- \* `TraceObjectInboundAddedToObjectPool`, and + -- \* `TraceObjectInboundRejectedFromObjectPool` + -- events. + mapM_ (uncurry $ submitObjectToObjectPool tracer) listOfObjectsToObjectPool + + -- TODO: + -- We can update the state so that other `object-submission` servers will + -- not try to add these objects to the objectpool. + if Map.null objectsToRequest + then serverReqObjectIds Zero object + else serverReqObjects object + + -- Pipelined request of objects + serverReqObjects :: + ObjectDecision objectId object -> + m (ServerStIdle Z objectId object m ()) + serverReqObjects object@ObjectDecision{objectsToRequest = objectsToRequest} = + pure $ + SendMsgRequestObjectsPipelined + objectsToRequest + (serverReqObjectIds (Succ Zero) object) + + serverReqObjectIds :: + forall (n :: N). + Nat n -> + ObjectDecision objectId object -> + m (ServerStIdle n objectId object m ()) + serverReqObjectIds + n + ObjectDecision{objectIdsToRequest = 0} = + case n of + Zero -> serverIdle + Succ _ -> handleReplies n + serverReqObjectIds + -- if there are no unacknowledged objectIds, the protocol requires sending + -- a blocking `MsgRequestObjectIds` request. This is important, as otherwise + -- the client side wouldn't have a chance to terminate the + -- mini-protocol. + Zero + ObjectDecision + { objectIdsToAcknowledge = objectIdsToAck + , objectPipelineObjectIds = False + , objectIdsToRequest = objectIdsToReq + } = + pure $ + SendMsgRequestObjectIdsBlocking + objectIdsToAck + objectIdsToReq + -- Our result if the client terminates the protocol + (traceWith tracer TraceObjectInboundTerminated) + ( \objectIds -> do + let objectIds' = NonEmpty.toList objectIds + objectIdsSeq = StrictSeq.fromList $ fst <$> objectIds' + objectIdsMap = Map.fromList objectIds' + unless (StrictSeq.length objectIdsSeq <= fromIntegral objectIdsToReq) $ + throwIO ProtocolErrorObjectIdsNotRequested + handleReceivedObjectIds objectIdsToReq objectIdsSeq objectIdsMap + serverIdle + ) + serverReqObjectIds + n@Zero + ObjectDecision + { objectIdsToAcknowledge = objectIdsToAck + , objectPipelineObjectIds = True + , objectIdsToRequest = objectIdsToReq + } = + pure $ + SendMsgRequestObjectIdsPipelined + objectIdsToAck + objectIdsToReq + (handleReplies (Succ n)) + serverReqObjectIds + n@Succ{} + ObjectDecision + { objectIdsToAcknowledge = objectIdsToAck + , objectPipelineObjectIds + , objectIdsToRequest = objectIdsToReq + } = + -- it is impossible that we have had `object`'s to request (Succ{} - is an + -- evidence for that), but no unacknowledged `objectId`s. + assert objectPipelineObjectIds $ + pure $ + SendMsgRequestObjectIdsPipelined + objectIdsToAck + objectIdsToReq + (handleReplies (Succ n)) + + handleReplies :: + forall (n :: N). + Nat (S n) -> + m (ServerStIdle (S n) objectId object m ()) + handleReplies (Succ n'@Succ{}) = + pure $ + CollectPipelined + Nothing + (handleReply (handleReplies n')) + handleReplies (Succ Zero) = + pure $ + CollectPipelined + Nothing + (handleReply serverIdle) + + handleReply :: + forall (n :: N). + m (ServerStIdle n objectId object m ()) -> + -- continuation + Collect objectId object -> + m (ServerStIdle n objectId object m ()) + handleReply k = \case + CollectObjectIds objectIdsToReq objectIds -> do + let objectIdsSeq = StrictSeq.fromList $ fst <$> objectIds + objectIdsMap = Map.fromList objectIds + unless (StrictSeq.length objectIdsSeq <= fromIntegral objectIdsToReq) $ + throwIO ProtocolErrorObjectIdsNotRequested + handleReceivedObjectIds objectIdsToReq objectIdsSeq objectIdsMap + k + CollectObjects objectIds objects -> do + let requested = Map.keysSet objectIds + received = Map.fromList [(objectId object, object) | object <- objects] + + unless (Map.keysSet received `Set.isSubsetOf` requested) $ + throwIO ProtocolErrorObjectNotRequested + + mbe <- handleReceivedObjects objectIds received + traceWith tracer $ TraceObjectDiffusionCollected (objectId `map` objects) + case mbe of + -- one of `object`s had a wrong size + Just e -> + traceWith tracer (TraceObjectInboundError e) + >> throwIO e + Nothing -> k diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs new file mode 100644 index 0000000000..fcff50520c --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -0,0 +1,532 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TupleSections #-} +{-# OPTIONS_GHC -Wno-unrecognised-pragmas #-} + +module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision + ( ObjectDecision (..) + , emptyObjectDecision + + -- * Internal API exposed for testing + , makeDecisions + , filterActivePeers + , pickObjectsToDownload + ) where + +import Control.Arrow ((>>>)) +import Control.Exception (assert) +import Data.Bifunctor (second) +import Data.Hashable +import Data.List qualified as List +import Data.Map.Merge.Strict qualified as Map +import Data.Map.Strict (Map) +import Data.Map.Strict qualified as Map +import Data.Maybe (mapMaybe) +import Data.Sequence.Strict qualified as StrictSeq +import Data.Set (Set) +import Data.Set qualified as Set +import GHC.Stack (HasCallStack) +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types +import Ouroboros.Network.Protocol.ObjectDiffusion.Type +import System.Random (random) + +-- | Make download decisions. +makeDecisions :: + forall peeraddr objectId object. + ( Ord peeraddr + , Ord objectId + , Hashable peeraddr + ) => + -- | decision policy + ObjectDecisionPolicy -> + -- | decision context + SharedObjectState peeraddr objectId object -> + -- | list of available peers. + -- + -- This is a subset of `peerObjectStates` of peers which either: + -- * can be used to download a `object`, + -- * can acknowledge some `objectId`s. + Map peeraddr (PeerObjectState objectId object) -> + ( SharedObjectState peeraddr objectId object + , Map peeraddr (ObjectDecision objectId object) + ) +makeDecisions policy st = + let (salt, rng') = random (peerRng st) + st' = st{peerRng = rng'} + in fn + . pickObjectsToDownload policy st' + . orderByRejections salt + where + fn :: + forall a. + (a, [(peeraddr, ObjectDecision objectId object)]) -> + (a, Map peeraddr (ObjectDecision objectId object)) + fn (a, as) = (a, Map.fromList as) + +-- | Order peers by how useful the OBJECTs they have provided are. +-- +-- OBJECTs delivered late will fail to apply because they were included in +-- a recently adopted block. Peers can race against each other by setting +-- `objectInflightMultiplicity` to > 1. In case of a tie a hash of the peeraddr +-- is used as a tie breaker. Since every invocation use a new salt a given +-- peeraddr does not have an advantage over time. +orderByRejections :: + Hashable peeraddr => + Int -> + Map peeraddr (PeerObjectState objectId object) -> + [(peeraddr, PeerObjectState objectId object)] +orderByRejections salt = + List.sortOn (\(peeraddr, ps) -> (score ps, hashWithSalt salt peeraddr)) + . Map.toList + +-- | Internal state of `pickObjectsToDownload` computation. +data St peeraddr objectId object + = St + { stInflightSize :: !SizeInBytes + -- ^ size of all `object`s in-flight. + , stInflight :: !(Map objectId Int) + -- ^ `objectId`s in-flight. + , stAcknowledged :: !(Map objectId Int) + -- ^ acknowledged `objectId` with multiplicities. It is used to update + -- `referenceCounts`. + , stInSubmissionToObjectPoolObjects :: Set objectId + -- ^ OBJECTs on their way to the objectpool. Used to prevent issueing new + -- fetch requests for them. + } + +-- | Distribute `object`'s to download among available peers. Peers are considered +-- in the given order. +-- +-- * pick objects from the set of available object's (in `objectId` order, note these sets +-- might be different for different peers). +-- * pick objects until the peers in-flight limit (we can go over the limit by one object) +-- (`objectsSizeInflightPerPeer` limit) +-- * pick objects until the overall in-flight limit (we can go over the limit by one object) +-- (`maxObjectsSizeInflight` limit) +-- * each object can be downloaded simultaneously from at most +-- `objectInflightMultiplicity` peers. +pickObjectsToDownload :: + forall peeraddr objectId object. + ( Ord peeraddr + , Ord objectId + ) => + -- | decision policy + ObjectDecisionPolicy -> + -- | shared state + SharedObjectState peeraddr objectId object -> + [(peeraddr, PeerObjectState objectId object)] -> + ( SharedObjectState peeraddr objectId object + , [(peeraddr, ObjectDecision objectId object)] + ) +pickObjectsToDownload + policy@ObjectDecisionPolicy + { objectsSizeInflightPerPeer + , maxObjectsSizeInflight + , objectInflightMultiplicity + } + sharedState@SharedObjectState + { peerObjectStates + , inflightObjects + , inflightObjectsSize + , bufferedObjects + , inSubmissionToObjectPoolObjects + , referenceCounts + } = + -- outer fold: fold `[(peeraddr, PeerObjectState objectId object)]` + List.mapAccumR + accumFn + -- initial state + St + { stInflight = inflightObjects + , stInflightSize = inflightObjectsSize + , stAcknowledged = Map.empty + , stInSubmissionToObjectPoolObjects = Map.keysSet inSubmissionToObjectPoolObjects + } + >>> gn + where + accumFn :: + St peeraddr objectId object -> + (peeraddr, PeerObjectState objectId object) -> + ( St peeraddr objectId object + , ( (peeraddr, PeerObjectState objectId object) + , ObjectDecision objectId object + ) + ) + accumFn + st@St + { stInflight + , stInflightSize + , stAcknowledged + , stInSubmissionToObjectPoolObjects + } + ( peeraddr + , peerObjectState@PeerObjectState + { availableObjectIds + , unknownObjects + , requestedObjectsInflight + , requestedObjectsInflightSize + } + ) = + let sizeInflightAll :: SizeInBytes + sizeInflightOther :: SizeInBytes + + sizeInflightAll = stInflightSize + sizeInflightOther = sizeInflightAll - requestedObjectsInflightSize + in if sizeInflightAll >= maxObjectsSizeInflight + then + let ( numObjectIdsToAck + , numObjectIdsToReq + , objectsToObjectPool@ObjectsToObjectPool{listOfObjectsToObjectPool} + , RefCountDiff{objectIdsToAck} + , peerObjectState' + ) = acknowledgeObjectIds policy sharedState peerObjectState + + stAcknowledged' = Map.unionWith (+) stAcknowledged objectIdsToAck + stInSubmissionToObjectPoolObjects' = + stInSubmissionToObjectPoolObjects + <> Set.fromList (map fst listOfObjectsToObjectPool) + in if requestedObjectIdsInflight peerObjectState' > 0 + then + -- we have objectIds to request + ( st + { stAcknowledged = stAcknowledged' + , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' + } + , + ( (peeraddr, peerObjectState') + , ObjectDecision + { objectIdsToAcknowledge = numObjectIdsToAck + , objectIdsToRequest = numObjectIdsToReq + , objectPipelineObjectIds = + not + . StrictSeq.null + . unacknowledgedObjectIds + $ peerObjectState' + , objectsToRequest = Map.empty + , objectsToObjectPool = objectsToObjectPool + } + ) + ) + else + -- there are no `objectId`s to request, nor we can request `object`s due + -- to in-flight size limits + ( st + , + ( (peeraddr, peerObjectState') + , emptyObjectDecision + ) + ) + else + let requestedObjectsInflightSize' :: SizeInBytes + objectsToRequestMap :: Map objectId SizeInBytes + + (requestedObjectsInflightSize', objectsToRequestMap) = + -- inner fold: fold available `objectId`s + -- + -- Note: although `Map.foldrWithKey` could be used here, it + -- does not allow to short circuit the fold, unlike + -- `foldWithState`. + foldWithState + ( \(objectId, (objectSize, inflightMultiplicity)) sizeInflight -> + if -- note that we pick `objectId`'s as long the `s` is + -- smaller or equal to `objectsSizeInflightPerPeer`. + sizeInflight <= objectsSizeInflightPerPeer + -- overall `object`'s in-flight must be smaller than + -- `maxObjectsSizeInflight` + && sizeInflight + sizeInflightOther <= maxObjectsSizeInflight + -- the transaction must not be downloaded from more + -- than `objectInflightMultiplicity` peers simultaneously + && inflightMultiplicity < objectInflightMultiplicity + -- TODO: we must validate that `objectSize` is smaller than + -- maximum objects size + then Just (sizeInflight + objectSize, (objectId, objectSize)) + else Nothing + ) + ( Map.assocs $ + -- merge `availableObjectIds` with `stInflight`, so we don't + -- need to lookup into `stInflight` on every `objectId` which + -- is in `availableObjectIds`. + Map.merge + (Map.mapMaybeMissing \_objectId -> Just . (,0)) + Map.dropMissing + (Map.zipWithMatched \_objectId -> (,)) + availableObjectIds + stInflight + -- remove `object`s which were already downloaded by some + -- other peer or are in-flight or unknown by this peer. + `Map.withoutKeys` ( Map.keysSet bufferedObjects + <> requestedObjectsInflight + <> unknownObjects + <> stInSubmissionToObjectPoolObjects + ) + ) + requestedObjectsInflightSize + -- pick from `objectId`'s which are available from that given + -- peer. Since we are folding a dictionary each `objectId` + -- will be selected only once from a given peer (at least + -- in each round). + + objectsToRequest = Map.keysSet objectsToRequestMap + peerObjectState' = + peerObjectState + { requestedObjectsInflightSize = requestedObjectsInflightSize' + , requestedObjectsInflight = + requestedObjectsInflight + <> objectsToRequest + } + + ( numObjectIdsToAck + , numObjectIdsToReq + , objectsToObjectPool@ObjectsToObjectPool{listOfObjectsToObjectPool} + , RefCountDiff{objectIdsToAck} + , peerObjectState'' + ) = acknowledgeObjectIds policy sharedState peerObjectState' + + stAcknowledged' = Map.unionWith (+) stAcknowledged objectIdsToAck + + stInflightDelta :: Map objectId Int + stInflightDelta = Map.fromSet (\_ -> 1) objectsToRequest + -- note: this is right since every `objectId` + -- could be picked at most once + + stInflight' :: Map objectId Int + stInflight' = Map.unionWith (+) stInflightDelta stInflight + + stInSubmissionToObjectPoolObjects' = + stInSubmissionToObjectPoolObjects + <> Set.fromList (map fst listOfObjectsToObjectPool) + in if requestedObjectIdsInflight peerObjectState'' > 0 + then + -- we can request `objectId`s & `object`s + ( St + { stInflight = stInflight' + , stInflightSize = sizeInflightOther + requestedObjectsInflightSize' + , stAcknowledged = stAcknowledged' + , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' + } + , + ( (peeraddr, peerObjectState'') + , ObjectDecision + { objectIdsToAcknowledge = numObjectIdsToAck + , objectPipelineObjectIds = + not + . StrictSeq.null + . unacknowledgedObjectIds + $ peerObjectState'' + , objectIdsToRequest = numObjectIdsToReq + , objectsToRequest = objectsToRequestMap + , objectsToObjectPool = objectsToObjectPool + } + ) + ) + else + -- there are no `objectId`s to request, only `object`s. + ( st + { stInflight = stInflight' + , stInflightSize = sizeInflightOther + requestedObjectsInflightSize' + , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' + } + , + ( (peeraddr, peerObjectState'') + , emptyObjectDecision{objectsToRequest = objectsToRequestMap} + ) + ) + + gn :: + ( St peeraddr objectId object + , [((peeraddr, PeerObjectState objectId object), ObjectDecision objectId object)] + ) -> + ( SharedObjectState peeraddr objectId object + , [(peeraddr, ObjectDecision objectId object)] + ) + gn + ( St + { stInflight + , stInflightSize + , stAcknowledged + } + , as + ) = + let peerObjectStates' = + Map.fromList ((\(a, _) -> a) <$> as) + <> peerObjectStates + + referenceCounts' = + Map.merge + (Map.mapMaybeMissing \_ x -> Just x) + (Map.mapMaybeMissing \_ _ -> assert False Nothing) + ( Map.zipWithMaybeMatched \_ x y -> + if x > y + then Just $! x - y + else Nothing + ) + referenceCounts + stAcknowledged + + liveSet = Map.keysSet referenceCounts' + + bufferedObjects' = + bufferedObjects + `Map.restrictKeys` liveSet + + inSubmissionToObjectPoolObjects' = + List.foldl' updateInSubmissionToObjectPoolObjects inSubmissionToObjectPoolObjects as + in ( sharedState + { peerObjectStates = peerObjectStates' + , inflightObjects = stInflight + , inflightObjectsSize = stInflightSize + , bufferedObjects = bufferedObjects' + , referenceCounts = referenceCounts' + , inSubmissionToObjectPoolObjects = inSubmissionToObjectPoolObjects' + } + , -- exclude empty results + mapMaybe + ( \((a, _), b) -> case b of + ObjectDecision + { objectIdsToAcknowledge = 0 + , objectIdsToRequest = 0 + , objectsToRequest + , objectsToObjectPool = ObjectsToObjectPool{listOfObjectsToObjectPool} + } + | null objectsToRequest + , null listOfObjectsToObjectPool -> + Nothing + _ -> Just (a, b) + ) + as + ) + where + updateInSubmissionToObjectPoolObjects :: + forall a. + Map objectId Int -> + (a, ObjectDecision objectId object) -> + Map objectId Int + updateInSubmissionToObjectPoolObjects m (_, ObjectDecision{objectsToObjectPool}) = + List.foldl' fn m (listOfObjectsToObjectPool objectsToObjectPool) + where + fn :: + Map objectId Int -> + (objectId, object) -> + Map objectId Int + fn x (objectId, _) = + Map.alter + ( \case + Nothing -> Just 1 + Just n -> Just $! succ n + ) + objectId + x + +-- | Filter peers which can either download a `object` or acknowledge `objectId`s. +filterActivePeers :: + forall peeraddr objectId object. + Ord objectId => + HasCallStack => + ObjectDecisionPolicy -> + SharedObjectState peeraddr objectId object -> + Map peeraddr (PeerObjectState objectId object) +filterActivePeers + policy@ObjectDecisionPolicy + { maxUnacknowledgedObjectIds + , objectsSizeInflightPerPeer + , maxObjectsSizeInflight + , objectInflightMultiplicity + } + sharedObjectState@SharedObjectState + { peerObjectStates + , bufferedObjects + , inflightObjects + , inflightObjectsSize + , inSubmissionToObjectPoolObjects + } + | inflightObjectsSize > maxObjectsSizeInflight = + -- we might be able to request objectIds, we cannot download objects + Map.filter fn peerObjectStates + | otherwise = + -- we might be able to request objectIds or objects. + Map.filter gn peerObjectStates + where + unrequestable = + Map.keysSet (Map.filter (>= objectInflightMultiplicity) inflightObjects) + <> Map.keysSet bufferedObjects + + fn :: PeerObjectState objectId object -> Bool + fn + peerObjectState@PeerObjectState + { requestedObjectIdsInflight + } = + requestedObjectIdsInflight == 0 + -- if a peer has objectIds in-flight, we cannot request more objectIds or objects. + && requestedObjectIdsInflight + numOfUnacked <= maxUnacknowledgedObjectIds + && objectIdsToRequest > 0 + where + -- Split `unacknowledgedObjectIds'` into the longest prefix of `objectId`s which + -- can be acknowledged and the unacknowledged `objectId`s. + (objectIdsToRequest, _, unackedObjectIds) = splitAcknowledgedObjectIds policy sharedObjectState peerObjectState + numOfUnacked = fromIntegral (StrictSeq.length unackedObjectIds) + + gn :: PeerObjectState objectId object -> Bool + gn + peerObjectState@PeerObjectState + { unacknowledgedObjectIds + , requestedObjectIdsInflight + , requestedObjectsInflight + , requestedObjectsInflightSize + , availableObjectIds + , unknownObjects + } = + ( requestedObjectIdsInflight == 0 + && requestedObjectIdsInflight + numOfUnacked <= maxUnacknowledgedObjectIds + && objectIdsToRequest > 0 + ) + || (underSizeLimit && not (Map.null downloadable)) + where + numOfUnacked = fromIntegral (StrictSeq.length unacknowledgedObjectIds) + underSizeLimit = requestedObjectsInflightSize <= objectsSizeInflightPerPeer + downloadable = + availableObjectIds + `Map.withoutKeys` requestedObjectsInflight + `Map.withoutKeys` unknownObjects + `Map.withoutKeys` unrequestable + `Map.withoutKeys` Map.keysSet inSubmissionToObjectPoolObjects + + -- Split `unacknowledgedObjectIds'` into the longest prefix of `objectId`s which + -- can be acknowledged and the unacknowledged `objectId`s. + (objectIdsToRequest, _, _) = splitAcknowledgedObjectIds policy sharedObjectState peerObjectState + +-- +-- Auxiliary functions +-- + +-- | A fold with state implemented as a `foldr` to take advantage of fold-build +-- fusion optimisation. +foldWithState :: + forall s a b c. + Ord b => + (a -> s -> Maybe (s, (b, c))) -> + [a] -> + s -> + (s, Map b c) +{-# INLINE foldWithState #-} +foldWithState f = foldr cons nil + where + cons :: + a -> + (s -> (s, Map b c)) -> + (s -> (s, Map b c)) + cons a k = \ !s -> + case f a s of + Nothing -> nil s + Just (!s', (!b, !c)) -> + case Map.insert b c `second` k s' of + r@(!_s, !_bs) -> r + + nil :: s -> (s, Map b c) + nil = \ !s -> (s, Map.empty) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs new file mode 100644 index 0000000000..5858845617 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -0,0 +1,68 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + +module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy + ( ObjectDecisionPolicy (..) + , defaultObjectDecisionPolicy + , max_OBJECT_SIZE + + -- * Re-exports + , NumObjectIdsToReq (..) + ) where + +import Control.Monad.Class.MonadTime.SI +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (SizeInBytes (..)) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsToReq (..)) + +-- | Maximal object size. +-- +-- Affects: +-- +-- * `ObjectDecisionPolicy` +-- * `maximumIngressQueue` for `object-submission` mini-protocol, see +-- `Ouroboros.Consensus.MiniProtocol.NodeToNode.objectSubmissionProtocolLimits` +max_OBJECT_SIZE :: SizeInBytes +max_OBJECT_SIZE = 65_540 + +-- | Policy for making decisions +data ObjectDecisionPolicy = ObjectDecisionPolicy + { maxNumObjectIdsToRequest :: !NumObjectIdsToReq + -- ^ a maximal number of objectIds requested at once. + , maxUnacknowledgedObjectIds :: !NumObjectIdsToReq + -- ^ maximal number of unacknowledgedObjectIds. Measured in `NumObjectIdsToReq` + -- since we enforce this policy by requesting not more objectIds than what + -- this limit allows. + , -- + -- Configuration of object decision logic. + -- + + objectsSizeInflightPerPeer :: !SizeInBytes + -- ^ a limit of object size in-flight from a single peer. + -- It can be exceed by max object size. + , maxObjectsSizeInflight :: !SizeInBytes + -- ^ a limit of object size in-flight from all peers. + -- It can be exceed by max object size. + , objectInflightMultiplicity :: !Int + -- ^ from how many peers download the `objectId` simultaneously + , bufferedObjectsMinLifetime :: !DiffTime + -- ^ how long OBJECTs that have been added to the objectpool will be + -- kept in the `bufferedObjects` cache. + , scoreRate :: !Double + -- ^ rate at which "rejected" OBJECTs drain. Unit: OBJECT/seconds. + , scoreMax :: !Double + -- ^ Maximum number of "rejections". Unit: seconds + } + deriving Show + +defaultObjectDecisionPolicy :: ObjectDecisionPolicy +defaultObjectDecisionPolicy = + ObjectDecisionPolicy + { maxNumObjectIdsToRequest = 3 + , maxUnacknowledgedObjectIds = 10 -- must be the same as objectSubmissionMaxUnacked + , objectsSizeInflightPerPeer = max_OBJECT_SIZE * 6 + , maxObjectsSizeInflight = max_OBJECT_SIZE * 20 + , objectInflightMultiplicity = 2 + , bufferedObjectsMinLifetime = 2 + , scoreRate = 0.1 + , scoreMax = 15 * 60 + } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs new file mode 100644 index 0000000000..2a954aa43d --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -0,0 +1,599 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE ScopedTypeVariables #-} + +module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry + ( ObjectChannels (..) + , ObjectChannelsVar + , ObjectObjectPoolSem + , SharedObjectStateVar + , newSharedObjectStateVar + , newObjectChannelsVar + , newObjectObjectPoolSem + , PeerObjectAPI (..) + , decisionLogicThreads + , withPeer + ) where + +import Control.Concurrent.Class.MonadMVar.Strict +import Control.Concurrent.Class.MonadSTM.Strict +import Control.Concurrent.Class.MonadSTM.TSem +import Control.Monad.Class.MonadAsync +import Control.Monad.Class.MonadFork +import Control.Monad.Class.MonadThrow +import Control.Monad.Class.MonadTime.SI +import Control.Monad.Class.MonadTimer.SI +import Control.Tracer (Tracer, traceWith) +import Data.Foldable as Foldable (foldl', traverse_) +import Data.Hashable +import Data.Map.Strict (Map) +import Data.Map.Strict qualified as Map +import Data.Maybe (fromMaybe) +import Data.Sequence.Strict (StrictSeq) +import Data.Sequence.Strict qualified as StrictSeq +import Data.Set qualified as Set +import Data.Typeable (Typeable) +import Data.Void (Void) +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API +import Ouroboros.Network.Protocol.ObjectDiffusion.Type + +-- | Communication channels between `ObjectDiffusion` client mini-protocol and +-- decision logic. +newtype ObjectChannels m peeraddr objectId object = ObjectChannels + { objectChannelMap :: Map peeraddr (StrictMVar m (ObjectDecision objectId object)) + } + +type ObjectChannelsVar m peeraddr objectId object = + StrictMVar m (ObjectChannels m peeraddr objectId object) + +newObjectChannelsVar :: MonadMVar m => m (ObjectChannelsVar m peeraddr objectId object) +newObjectChannelsVar = newMVar (ObjectChannels Map.empty) + +newtype ObjectObjectPoolSem m = ObjectObjectPoolSem (TSem m) + +newObjectObjectPoolSem :: MonadSTM m => m (ObjectObjectPoolSem m) +newObjectObjectPoolSem = ObjectObjectPoolSem <$> atomically (newTSem 1) + +-- | API to access `PeerObjectState` inside `PeerObjectStateVar`. +data PeerObjectAPI m objectId object = PeerObjectAPI + { readObjectDecision :: m (ObjectDecision objectId object) + -- ^ a blocking action which reads `ObjectDecision` + , handleReceivedObjectIds :: + NumObjectIdsToReq -> + StrictSeq objectId -> + -- \^ received objectIds + Map objectId SizeInBytes -> + -- \^ received sizes of advertised object's + m () + -- ^ handle received objectIds + , handleReceivedObjects :: + Map objectId SizeInBytes -> + -- \^ requested objectIds + Map objectId object -> + -- \^ received objects + m (Maybe ObjectDiffusionProtocolError) + -- ^ handle received objects + , submitObjectToObjectPool :: + Tracer m (TraceObjectDiffusionInbound objectId object) -> + objectId -> + object -> + m () + -- ^ submit the given (objectId, object) to the objectpool. + } + +data ObjectObjectPoolResult = ObjectAccepted | ObjectRejected + +-- | A bracket function which registers / de-registers a new peer in +-- `SharedObjectStateVar` and `PeerObjectStateVar`s, which exposes `PeerObjectStateAPI`. +-- `PeerObjectStateAPI` is only safe inside the `withPeer` scope. +withPeer :: + forall object peeraddr objectId idx m a. + ( MonadMask m + , MonadMVar m + , MonadSTM m + , MonadMonotonicTime m + , Ord objectId + , Show objectId + , Typeable objectId + , Ord peeraddr + , Show peeraddr + ) => + Tracer m (TraceObjectLogic peeraddr objectId object) -> + ObjectChannelsVar m peeraddr objectId object -> + ObjectObjectPoolSem m -> + ObjectDecisionPolicy -> + SharedObjectStateVar m peeraddr objectId object -> + ObjectDiffusionObjectPoolReader objectId object idx m -> + ObjectDiffusionObjectPoolWriter objectId object idx m -> + (object -> SizeInBytes) -> + peeraddr -> + -- ^ new peer + + -- | callback which gives access to `PeerObjectStateAPI` + (PeerObjectAPI m objectId object -> m a) -> + m a +withPeer + tracer + channelsVar + (ObjectObjectPoolSem objectpoolSem) + policy@ObjectDecisionPolicy{bufferedObjectsMinLifetime} + sharedStateVar + ObjectDiffusionObjectPoolReader{objectpoolGetSnapshot} + ObjectDiffusionObjectPoolWriter{objectpoolAddObjects} + objectSize + peeraddr + io = + bracket + ( do + -- create a communication channel + !peerObjectAPI <- + modifyMVar + channelsVar + \ObjectChannels{objectChannelMap} -> do + chann <- newEmptyMVar + let (chann', objectChannelMap') = + Map.alterF + ( \mbChann -> + let !chann'' = fromMaybe chann mbChann + in (chann'', Just chann'') + ) + peeraddr + objectChannelMap + return + ( ObjectChannels{objectChannelMap = objectChannelMap'} + , PeerObjectAPI + { readObjectDecision = takeMVar chann' + , handleReceivedObjectIds + , handleReceivedObjects + , submitObjectToObjectPool + } + ) + + atomically $ modifyTVar sharedStateVar registerPeer + return peerObjectAPI + ) + -- the handler is a short blocking operation, thus we need to use + -- `uninterruptibleMask_` + ( \_ -> uninterruptibleMask_ do + atomically $ modifyTVar sharedStateVar unregisterPeer + modifyMVar_ + channelsVar + \ObjectChannels{objectChannelMap} -> + return ObjectChannels{objectChannelMap = Map.delete peeraddr objectChannelMap} + ) + io + where + registerPeer :: + SharedObjectState peeraddr objectId object -> + SharedObjectState peeraddr objectId object + registerPeer st@SharedObjectState{peerObjectStates} = + st + { peerObjectStates = + Map.insert + peeraddr + PeerObjectState + { availableObjectIds = Map.empty + , requestedObjectIdsInflight = 0 + , requestedObjectsInflightSize = 0 + , requestedObjectsInflight = Set.empty + , unacknowledgedObjectIds = StrictSeq.empty + , unknownObjects = Set.empty + , score = 0 + , scoreTs = Time 0 + , downloadedObjects = Map.empty + , toObjectPoolObjects = Map.empty + } + peerObjectStates + } + + -- TODO: this function needs to be tested! + -- Issue: https://github.com/IntersectMBO/ouroboros-network/issues/5151 + unregisterPeer :: + SharedObjectState peeraddr objectId object -> + SharedObjectState peeraddr objectId object + unregisterPeer + st@SharedObjectState + { peerObjectStates + , bufferedObjects + , referenceCounts + , inflightObjects + , inflightObjectsSize + , inSubmissionToObjectPoolObjects + } = + st + { peerObjectStates = peerObjectStates' + , bufferedObjects = bufferedObjects' + , referenceCounts = referenceCounts' + , inflightObjects = inflightObjects' + , inflightObjectsSize = inflightObjectsSize' + , inSubmissionToObjectPoolObjects = inSubmissionToObjectPoolObjects' + } + where + ( PeerObjectState + { unacknowledgedObjectIds + , requestedObjectsInflight + , requestedObjectsInflightSize + , toObjectPoolObjects + } + , peerObjectStates' + ) = + Map.alterF + ( \case + Nothing -> error ("ObjectDiffusion.withPeer: invariant violation for peer " ++ show peeraddr) + Just a -> (a, Nothing) + ) + peeraddr + peerObjectStates + + referenceCounts' = + Foldable.foldl' + ( flip $ Map.update \cnt -> + if cnt > 1 + then Just $! pred cnt + else Nothing + ) + referenceCounts + unacknowledgedObjectIds + + liveSet = Map.keysSet referenceCounts' + + bufferedObjects' = + bufferedObjects + `Map.restrictKeys` liveSet + + inflightObjects' = Foldable.foldl' purgeInflightObjects inflightObjects requestedObjectsInflight + inflightObjectsSize' = inflightObjectsSize - requestedObjectsInflightSize + + -- When we unregister a peer, we need to subtract all objects in the + -- `toObjectPoolObjects`, as they will not be submitted to the objectpool. + inSubmissionToObjectPoolObjects' = + Foldable.foldl' + ( flip $ Map.update \cnt -> + if cnt > 1 + then Just $! pred cnt + else Nothing + ) + inSubmissionToObjectPoolObjects + (Map.keysSet toObjectPoolObjects) + + purgeInflightObjects m objectId = Map.alter fn objectId m + where + fn (Just n) | n > 1 = Just $! pred n + fn _ = Nothing + + -- + -- PeerObjectAPI + -- + + submitObjectToObjectPool :: + Tracer m (TraceObjectDiffusionInbound objectId object) -> objectId -> object -> m () + submitObjectToObjectPool objectTracer objectId object = + bracket_ + (atomically $ waitTSem objectpoolSem) + (atomically $ signalTSem objectpoolSem) + $ do + start <- getMonotonicTime + res <- addObject + end <- getMonotonicTime + atomically $ modifyTVar sharedStateVar (updateBufferedObject end res) + let duration = end `diffTime` start + case res of + ObjectAccepted -> traceWith objectTracer (TraceObjectInboundAddedToObjectPool [objectId] duration) + ObjectRejected -> traceWith objectTracer (TraceObjectInboundRejectedFromObjectPool [objectId] duration) + where + -- add the object to the objectpool + addObject :: m ObjectObjectPoolResult + addObject = do + mpSnapshot <- atomically objectpoolGetSnapshot + + -- Note that checking if the objectpool contains a OBJECT before + -- spending several ms attempting to add it to the pool has + -- been judged immoral. + if objectpoolHasObject mpSnapshot objectId + then do + !now <- getMonotonicTime + !s <- countRejectedObjects now 1 + traceWith objectTracer $ + TraceObjectDiffusionProcessed + ProcessedObjectCount + { pobjectcAccepted = 0 + , pobjectcRejected = 1 + , pobjectcScore = s + } + return ObjectRejected + else do + acceptedObjects <- objectpoolAddObjects [object] + end <- getMonotonicTime + if null acceptedObjects + then do + !s <- countRejectedObjects end 1 + traceWith objectTracer $ + TraceObjectDiffusionProcessed + ProcessedObjectCount + { pobjectcAccepted = 0 + , pobjectcRejected = 1 + , pobjectcScore = s + } + return ObjectRejected + else do + !s <- countRejectedObjects end 0 + traceWith objectTracer $ + TraceObjectDiffusionProcessed + ProcessedObjectCount + { pobjectcAccepted = 1 + , pobjectcRejected = 0 + , pobjectcScore = s + } + return ObjectAccepted + + updateBufferedObject :: + Time -> + ObjectObjectPoolResult -> + SharedObjectState peeraddr objectId object -> + SharedObjectState peeraddr objectId object + updateBufferedObject + _ + ObjectRejected + st@SharedObjectState + { peerObjectStates + , inSubmissionToObjectPoolObjects + } = + st + { peerObjectStates = peerObjectStates' + , inSubmissionToObjectPoolObjects = inSubmissionToObjectPoolObjects' + } + where + inSubmissionToObjectPoolObjects' = + Map.update + (\case 1 -> Nothing; n -> Just $! pred n) + objectId + inSubmissionToObjectPoolObjects + + peerObjectStates' = Map.update fn peeraddr peerObjectStates + where + fn ps = Just $! ps{toObjectPoolObjects = Map.delete objectId (toObjectPoolObjects ps)} + updateBufferedObject + now + ObjectAccepted + st@SharedObjectState + { peerObjectStates + , bufferedObjects + , referenceCounts + , timedObjects + , inSubmissionToObjectPoolObjects + } = + st + { peerObjectStates = peerObjectStates' + , bufferedObjects = bufferedObjects' + , timedObjects = timedObjects' + , referenceCounts = referenceCounts' + , inSubmissionToObjectPoolObjects = inSubmissionToObjectPoolObjects' + } + where + inSubmissionToObjectPoolObjects' = + Map.update + (\case 1 -> Nothing; n -> Just $! pred n) + objectId + inSubmissionToObjectPoolObjects + + timedObjects' = Map.alter fn (addTime bufferedObjectsMinLifetime now) timedObjects + where + fn :: Maybe [objectId] -> Maybe [objectId] + fn Nothing = Just [objectId] + fn (Just objectIds) = Just $! (objectId : objectIds) + + referenceCounts' = Map.alter fn objectId referenceCounts + where + fn :: Maybe Int -> Maybe Int + fn Nothing = Just 1 + fn (Just n) = Just $! succ n + + bufferedObjects' = Map.insert objectId (Just object) bufferedObjects + + peerObjectStates' = Map.update fn peeraddr peerObjectStates + where + fn ps = Just $! ps{toObjectPoolObjects = Map.delete objectId (toObjectPoolObjects ps)} + + handleReceivedObjectIds :: + NumObjectIdsToReq -> + StrictSeq objectId -> + Map objectId SizeInBytes -> + m () + handleReceivedObjectIds numObjectIdsToReq objectIdsSeq objectIdsMap = + receivedObjectIds + tracer + sharedStateVar + objectpoolGetSnapshot + peeraddr + numObjectIdsToReq + objectIdsSeq + objectIdsMap + + handleReceivedObjects :: + Map objectId SizeInBytes -> + -- \^ requested objectIds with their announced size + Map objectId object -> + -- \^ received objects + m (Maybe ObjectDiffusionProtocolError) + handleReceivedObjects objectIds objects = + collectObjects tracer objectSize sharedStateVar peeraddr objectIds objects + + -- Update `score` & `scoreTs` fields of `PeerObjectState`, return the new + -- updated `score`. + -- + -- PRECONDITION: the `Double` argument is non-negative. + countRejectedObjects :: + Time -> + Double -> + m Double + countRejectedObjects _ n + | n < 0 = + error ("ObjectDiffusion.countRejectedObjects: invariant violation for peer " ++ show peeraddr) + countRejectedObjects now n = atomically $ stateTVar sharedStateVar $ \st -> + let (result, peerObjectStates') = Map.alterF fn peeraddr (peerObjectStates st) + in (result, st{peerObjectStates = peerObjectStates'}) + where + fn :: Maybe (PeerObjectState objectId object) -> (Double, Maybe (PeerObjectState objectId object)) + fn Nothing = error ("ObjectDiffusion.withPeer: invariant violation for peer " ++ show peeraddr) + fn (Just ps) = (score ps', Just $! ps') + where + ps' = updateRejects policy now n ps + +updateRejects :: + ObjectDecisionPolicy -> + Time -> + Double -> + PeerObjectState objectId object -> + PeerObjectState objectId object +updateRejects _ now 0 pts | score pts == 0 = pts{scoreTs = now} +updateRejects + ObjectDecisionPolicy{scoreRate, scoreMax} + now + n + pts@PeerObjectState{score, scoreTs} = + let duration = diffTime now scoreTs + !drain = realToFrac duration * scoreRate + !drained = max 0 $ score - drain + in pts + { score = min scoreMax $ drained + n + , scoreTs = now + } + +drainRejectionThread :: + forall m peeraddr objectId object. + ( MonadDelay m + , MonadSTM m + , MonadThread m + , Ord objectId + ) => + Tracer m (TraceObjectLogic peeraddr objectId object) -> + ObjectDecisionPolicy -> + SharedObjectStateVar m peeraddr objectId object -> + m Void +drainRejectionThread tracer policy sharedStateVar = do + labelThisThread "object-rejection-drain" + now <- getMonotonicTime + go $ addTime drainInterval now + where + drainInterval :: DiffTime + drainInterval = 7 + + go :: Time -> m Void + go !nextDrain = do + threadDelay 1 + + !now <- getMonotonicTime + st'' <- atomically $ do + st <- readTVar sharedStateVar + let ptss = + if now > nextDrain + then Map.map (updateRejects policy now 0) (peerObjectStates st) + else peerObjectStates st + st' = + tickTimedObjects + now + st + { peerObjectStates = ptss + } + writeTVar sharedStateVar st' + return st' + traceWith tracer (TraceSharedObjectState "drainRejectionThread" st'') + + if now > nextDrain + then go $ addTime drainInterval now + else go nextDrain + +decisionLogicThread :: + forall m peeraddr objectId object. + ( MonadDelay m + , MonadMVar m + , MonadSTM m + , MonadMask m + , MonadFork m + , Ord peeraddr + , Ord objectId + , Hashable peeraddr + ) => + Tracer m (TraceObjectLogic peeraddr objectId object) -> + Tracer m ObjectDiffusionCounters -> + ObjectDecisionPolicy -> + ObjectChannelsVar m peeraddr objectId object -> + SharedObjectStateVar m peeraddr objectId object -> + m Void +decisionLogicThread tracer counterTracer policy objectChannelsVar sharedStateVar = do + labelThisThread "object-decision" + go + where + go :: m Void + go = do + -- We rate limit the decision making process, it could overwhelm the CPU + -- if there are too many inbound connections. + threadDelay _DECISION_LOOP_DELAY + + (decisions, st) <- atomically do + sharedObjectState <- readTVar sharedStateVar + let activePeers = filterActivePeers policy sharedObjectState + + -- block until at least one peer is active + check (not (Map.null activePeers)) + + let (sharedState, decisions) = makeDecisions policy sharedObjectState activePeers + writeTVar sharedStateVar sharedState + return (decisions, sharedState) + traceWith tracer (TraceSharedObjectState "decisionLogicThread" st) + traceWith tracer (TraceObjectDecisions decisions) + ObjectChannels{objectChannelMap} <- readMVar objectChannelsVar + traverse_ + (\(mvar, d) -> modifyMVarWithDefault_ mvar d (\d' -> pure (d' <> d))) + ( Map.intersectionWith + (,) + objectChannelMap + decisions + ) + traceWith counterTracer (mkObjectDiffusionCounters st) + go + + -- Variant of modifyMVar_ that puts a default value if the MVar is empty. + modifyMVarWithDefault_ :: StrictMVar m a -> a -> (a -> m a) -> m () + modifyMVarWithDefault_ m d io = + mask $ \restore -> do + mbA <- tryTakeMVar m + case mbA of + Just a -> do + a' <- restore (io a) `onException` putMVar m a + putMVar m a' + Nothing -> putMVar m d + +-- | Run `decisionLogicThread` and `drainRejectionThread`. +decisionLogicThreads :: + forall m peeraddr objectId object. + ( MonadDelay m + , MonadMVar m + , MonadMask m + , MonadAsync m + , MonadFork m + , Ord peeraddr + , Ord objectId + , Hashable peeraddr + ) => + Tracer m (TraceObjectLogic peeraddr objectId object) -> + Tracer m ObjectDiffusionCounters -> + ObjectDecisionPolicy -> + ObjectChannelsVar m peeraddr objectId object -> + SharedObjectStateVar m peeraddr objectId object -> + m Void +decisionLogicThreads tracer counterTracer policy objectChannelsVar sharedStateVar = + uncurry (<>) + <$> drainRejectionThread tracer policy sharedStateVar + `concurrently` decisionLogicThread tracer counterTracer policy objectChannelsVar sharedStateVar + +-- `5ms` delay +_DECISION_LOOP_DELAY :: DiffTime +_DECISION_LOOP_DELAY = 0.005 diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs new file mode 100644 index 0000000000..b4bce755d4 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -0,0 +1,646 @@ +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} + +module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State + ( -- * Core API + SharedObjectState (..) + , PeerObjectState (..) + , SharedObjectStateVar + , newSharedObjectStateVar + , receivedObjectIds + , collectObjects + , acknowledgeObjectIds + , splitAcknowledgedObjectIds + , tickTimedObjects + , const_MAX_OBJECT_SIZE_DISCREPENCY + + -- * Internals, only exported for testing purposes: + , RefCountDiff (..) + , updateRefCounts + , receivedObjectIdsImpl + , collectObjectsImpl + ) where + +import Control.Concurrent.Class.MonadSTM.Strict +import Control.Exception (assert) +import Control.Monad.Class.MonadTime.SI +import Control.Tracer (Tracer, traceWith) +import Data.Foldable (fold, toList) +import Data.Foldable qualified as Foldable +import Data.Functor (($>)) +import Data.Map.Merge.Strict qualified as Map +import Data.Map.Strict (Map) +import Data.Map.Strict qualified as Map +import Data.Maybe (fromJust, maybeToList) +import Data.Sequence.Strict (StrictSeq) +import Data.Sequence.Strict qualified as StrictSeq +import Data.Set qualified as Set +import Data.Typeable (Typeable) +import GHC.Stack (HasCallStack) +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (ObjectPoolSnapshot (..), SizeInBytes (..)) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsToAck (..)) +import System.Random (StdGen) + +-- +-- Pure public API +-- + +acknowledgeObjectIds :: + forall peeraddr object objectId. + Ord objectId => + HasCallStack => + ObjectDecisionPolicy -> + SharedObjectState peeraddr objectId object -> + PeerObjectState objectId object -> + -- | number of objectId to acknowledge, requests, objects which we can submit to the + -- objectpool, objectIds to acknowledge with multiplicities, updated PeerObjectState. + ( NumObjectIdsToAck + , NumObjectIdsToReq + , ObjectsToObjectPool objectId object + , RefCountDiff objectId + , PeerObjectState objectId object + ) +{-# INLINE acknowledgeObjectIds #-} +acknowledgeObjectIds + policy + sharedObjectState + ps@PeerObjectState + { availableObjectIds + , unknownObjects + , requestedObjectIdsInflight + , downloadedObjects + , score + , toObjectPoolObjects + } = + -- We can only acknowledge objectIds when we can request new ones, since + -- a `MsgRequestObjectIds` for 0 objectIds is a protocol error. + if objectIdsToRequest > 0 + then + ( objectIdsToAcknowledge + , objectIdsToRequest + , ObjectsToObjectPool objectsToObjectPool + , refCountDiff + , ps + { unacknowledgedObjectIds = unacknowledgedObjectIds' + , availableObjectIds = availableObjectIds' + , unknownObjects = unknownObjects' + , requestedObjectIdsInflight = + requestedObjectIdsInflight + + objectIdsToRequest + , downloadedObjects = downloadedObjects' + , score = score' + , toObjectPoolObjects = toObjectPoolObjects' + } + ) + else + ( 0 + , 0 + , ObjectsToObjectPool objectsToObjectPool + , RefCountDiff Map.empty + , ps{toObjectPoolObjects = toObjectPoolObjects'} + ) + where + -- Split `unacknowledgedObjectIds'` into the longest prefix of `objectId`s which + -- can be acknowledged and the unacknowledged `objectId`s. + (objectIdsToRequest, acknowledgedObjectIds, unacknowledgedObjectIds') = + splitAcknowledgedObjectIds policy sharedObjectState ps + + objectsToObjectPool = + [ (objectId, object) + | objectId <- toList toObjectPoolObjectIds + , objectId `Map.notMember` bufferedObjects sharedObjectState + , object <- maybeToList $ objectId `Map.lookup` downloadedObjects + ] + (toObjectPoolObjectIds, _) = + StrictSeq.spanl (`Map.member` downloadedObjects) acknowledgedObjectIds + + objectsToObjectPoolMap = Map.fromList objectsToObjectPool + + toObjectPoolObjects' = toObjectPoolObjects <> objectsToObjectPoolMap + + (downloadedObjects', ackedDownloadedObjects) = Map.partitionWithKey (\objectId _ -> objectId `Set.member` liveSet) downloadedObjects + -- latexObjects: transactions which were downloaded by another peer before we + -- downloaded them; it relies on that `objectToObjectPool` filters out + -- `bufferedObjects`. + lateObjects = + Map.filterWithKey + (\objectId _ -> objectId `Map.notMember` objectsToObjectPoolMap) + ackedDownloadedObjects + score' = score + fromIntegral (Map.size lateObjects) + + -- the set of live `objectIds` + liveSet = Set.fromList (toList unacknowledgedObjectIds') + + availableObjectIds' = + availableObjectIds + `Map.restrictKeys` liveSet + + -- We remove all acknowledged `objectId`s which are not in + -- `unacknowledgedObjectIds''`, but also return the unknown set before any + -- modifications (which is used to compute `unacknowledgedObjectIds''` + -- above). + unknownObjects' = unknownObjects `Set.intersection` liveSet + + refCountDiff = + RefCountDiff $ + foldr + (Map.alter fn) + Map.empty + acknowledgedObjectIds + where + fn :: Maybe Int -> Maybe Int + fn Nothing = Just 1 + fn (Just n) = Just $! n + 1 + + objectIdsToAcknowledge :: NumObjectIdsToAck + objectIdsToAcknowledge = fromIntegral $ StrictSeq.length acknowledgedObjectIds + +-- | Split unacknowledged objectIds into acknowledged and unacknowledged parts, also +-- return number of objectIds which can be requested. +splitAcknowledgedObjectIds :: + Ord objectId => + HasCallStack => + ObjectDecisionPolicy -> + SharedObjectState peer objectId object -> + PeerObjectState objectId object -> + -- | number of objectIds to request, acknowledged objectIds, unacknowledged objectIds + (NumObjectIdsToReq, StrictSeq.StrictSeq objectId, StrictSeq.StrictSeq objectId) +splitAcknowledgedObjectIds + ObjectDecisionPolicy + { maxUnacknowledgedObjectIds + , maxNumObjectIdsToRequest + } + SharedObjectState + { bufferedObjects + } + PeerObjectState + { unacknowledgedObjectIds + , unknownObjects + , downloadedObjects + , requestedObjectsInflight + , requestedObjectIdsInflight + } = + (objectIdsToRequest, acknowledgedObjectIds', unacknowledgedObjectIds') + where + (acknowledgedObjectIds', unacknowledgedObjectIds') = + StrictSeq.spanl + ( \objectId -> + ( objectId `Map.member` bufferedObjects + || objectId `Set.member` unknownObjects + || objectId `Map.member` downloadedObjects + ) + && objectId `Set.notMember` requestedObjectsInflight + ) + unacknowledgedObjectIds + numOfUnacked = StrictSeq.length unacknowledgedObjectIds + numOfAcked = StrictSeq.length acknowledgedObjectIds' + unackedAndRequested = fromIntegral numOfUnacked + requestedObjectIdsInflight + + objectIdsToRequest = + assert (unackedAndRequested <= maxUnacknowledgedObjectIds) $ + assert (requestedObjectIdsInflight <= maxNumObjectIdsToRequest) $ + (maxUnacknowledgedObjectIds - unackedAndRequested + fromIntegral numOfAcked) + `min` (maxNumObjectIdsToRequest - requestedObjectIdsInflight) + +-- | `RefCountDiff` represents a map of `objectId` which can be acknowledged +-- together with their multiplicities. +newtype RefCountDiff objectId = RefCountDiff + { objectIdsToAck :: Map objectId Int + } + +updateRefCounts :: + Ord objectId => + Map objectId Int -> + RefCountDiff objectId -> + Map objectId Int +updateRefCounts referenceCounts (RefCountDiff diff) = + Map.merge + (Map.mapMaybeMissing \_ x -> Just x) + (Map.mapMaybeMissing \_ _ -> Nothing) + ( Map.zipWithMaybeMatched \_ x y -> + assert + (x >= y) + if x > y + then Just $! x - y + else Nothing + ) + referenceCounts + diff + +tickTimedObjects :: + forall peeraddr object objectId. + Ord objectId => + Time -> + SharedObjectState peeraddr objectId object -> + SharedObjectState peeraddr objectId object +tickTimedObjects + now + st@SharedObjectState + { timedObjects + , referenceCounts + , bufferedObjects + } = + let (expiredObjects', timedObjects') = + case Map.splitLookup now timedObjects of + (expired, Just objectIds, timed) -> + ( expired -- Map.split doesn't include the `now` entry in the map + , Map.insert now objectIds timed + ) + (expired, Nothing, timed) -> + (expired, timed) + refDiff = Map.foldl' fn Map.empty expiredObjects' + referenceCounts' = updateRefCounts referenceCounts (RefCountDiff refDiff) + liveSet = Map.keysSet referenceCounts' + bufferedObjects' = bufferedObjects `Map.restrictKeys` liveSet + in st + { timedObjects = timedObjects' + , referenceCounts = referenceCounts' + , bufferedObjects = bufferedObjects' + } + where + fn :: + Map objectId Int -> + [objectId] -> + Map objectId Int + fn m objectIds = Foldable.foldl' gn m objectIds + + gn :: + Map objectId Int -> + objectId -> + Map objectId Int + gn m objectId = Map.alter af objectId m + + af :: + Maybe Int -> + Maybe Int + af Nothing = Just 1 + af (Just n) = Just $! succ n + +-- +-- Pure internal API +-- + +-- | Insert received `objectId`s and return the number of objectIds to be acknowledged +-- and the updated `SharedObjectState`. +receivedObjectIdsImpl :: + forall peeraddr object objectId. + (Ord objectId, Ord peeraddr, HasCallStack) => + -- | check if objectId is in the objectpool, ref + -- 'objectpoolHasObject' + (objectId -> Bool) -> + peeraddr -> + -- | number of requests to subtract from + -- `requestedObjectIdsInflight` + NumObjectIdsToReq -> + -- | sequence of received `objectIds` + StrictSeq objectId -> + -- | received `objectId`s with sizes + Map objectId SizeInBytes -> + SharedObjectState peeraddr objectId object -> + SharedObjectState peeraddr objectId object +receivedObjectIdsImpl + objectpoolHasObject + peeraddr + reqNo + objectIdsSeq + objectIdsMap + st@SharedObjectState + { peerObjectStates + , bufferedObjects + , referenceCounts + } = + -- using `alterF` so the update of `PeerObjectState` is done in one lookup + case Map.alterF + (fmap Just . fn . fromJust) + peeraddr + peerObjectStates of + (st', peerObjectStates') -> + st'{peerObjectStates = peerObjectStates'} + where + -- update `PeerObjectState` and return number of `objectId`s to acknowledged and + -- updated `SharedObjectState`. + fn :: + PeerObjectState objectId object -> + ( SharedObjectState peeraddr objectId object + , PeerObjectState objectId object + ) + fn + ps@PeerObjectState + { availableObjectIds + , requestedObjectIdsInflight + , unacknowledgedObjectIds + } = + (st', ps') + where + -- + -- Handle new `objectId`s + -- + + -- Divide the new objectIds in two: those that are already in the objectpool + -- and those that are not. We'll request some objects from the latter. + (ignoredObjectIds, availableObjectIdsMap) = + Map.partitionWithKey + (\objectId _ -> objectpoolHasObject objectId) + objectIdsMap + + -- Add all `objectIds` from `availableObjectIdsMap` which are not + -- unacknowledged or already buffered. Unacknowledged objectIds must have + -- already been added to `availableObjectIds` map before. + availableObjectIds' = + Map.foldlWithKey + (\m objectId sizeInBytes -> Map.insert objectId sizeInBytes m) + availableObjectIds + ( Map.filterWithKey + ( \objectId _ -> + objectId `notElem` unacknowledgedObjectIds + && objectId `Map.notMember` bufferedObjects + ) + availableObjectIdsMap + ) + + -- Add received objectIds to `unacknowledgedObjectIds`. + unacknowledgedObjectIds' = unacknowledgedObjectIds <> objectIdsSeq + + -- Add ignored `objects` to buffered ones. + -- Note: we prefer to keep the `object` if it's already in `bufferedObjects`. + bufferedObjects' = + bufferedObjects + <> Map.map (const Nothing) ignoredObjectIds + + referenceCounts' = + Foldable.foldl' + ( flip $ + Map.alter + ( \case + Nothing -> Just $! 1 + Just cnt -> Just $! succ cnt + ) + ) + referenceCounts + objectIdsSeq + + st' = + st + { bufferedObjects = bufferedObjects' + , referenceCounts = referenceCounts' + } + ps' = + assert + (requestedObjectIdsInflight >= reqNo) + ps + { availableObjectIds = availableObjectIds' + , unacknowledgedObjectIds = unacknowledgedObjectIds' + , requestedObjectIdsInflight = requestedObjectIdsInflight - reqNo + } + +-- | We check advertised sizes up in a fuzzy way. The advertised and received +-- sizes need to agree up to `const_MAX_OBJECT_SIZE_DISCREPENCY`. +const_MAX_OBJECT_SIZE_DISCREPENCY :: SizeInBytes +const_MAX_OBJECT_SIZE_DISCREPENCY = 32 + +collectObjectsImpl :: + forall peeraddr object objectId. + ( Ord peeraddr + , Ord objectId + , Show objectId + , Typeable objectId + ) => + -- | compute object size + (object -> SizeInBytes) -> + peeraddr -> + -- | requested objectIds + Map objectId SizeInBytes -> + -- | received objects + Map objectId object -> + SharedObjectState peeraddr objectId object -> + -- | Return list of `objectId` which sizes didn't match or a new state. + -- If one of the `object` has wrong size, we return an error. The + -- mini-protocol will throw, which will clean the state map from this peer. + Either + ObjectDiffusionProtocolError + (SharedObjectState peeraddr objectId object) +collectObjectsImpl + objectSize + peeraddr + requestedObjectIdsMap + receivedObjects + st@SharedObjectState{peerObjectStates} = + -- using `alterF` so the update of `PeerObjectState` is done in one lookup + case Map.alterF + (fmap Just . fn . fromJust) + peeraddr + peerObjectStates of + (Right st', peerObjectStates') -> + Right st'{peerObjectStates = peerObjectStates'} + (Left e, _) -> + Left $ ProtocolErrorObjectSizeError e + where + -- Update `PeerObjectState` and partially update `SharedObjectState` (except of + -- `peerObjectStates`). + fn :: + PeerObjectState objectId object -> + ( Either + [(objectId, SizeInBytes, SizeInBytes)] + (SharedObjectState peeraddr objectId object) + , PeerObjectState objectId object + ) + fn ps = + case wrongSizedObjects of + [] -> + ( Right st' + , ps'' + ) + _ -> + ( Left wrongSizedObjects + , ps + ) + where + wrongSizedObjects :: [(objectId, SizeInBytes, SizeInBytes)] + wrongSizedObjects = + map (\(a, (b, c)) -> (a, b, c)) + . Map.toList + $ Map.merge + Map.dropMissing + Map.dropMissing + ( Map.zipWithMaybeMatched \_ receivedSize advertisedSize -> + if receivedSize `checkObjectSize` advertisedSize + then Nothing + else Just (receivedSize, advertisedSize) + ) + (objectSize `Map.map` receivedObjects) + requestedObjectIdsMap + + checkObjectSize :: + SizeInBytes -> + SizeInBytes -> + Bool + checkObjectSize received advertised + | received > advertised = + received - advertised <= const_MAX_OBJECT_SIZE_DISCREPENCY + | otherwise = + advertised - received <= const_MAX_OBJECT_SIZE_DISCREPENCY + + requestedObjectIds = Map.keysSet requestedObjectIdsMap + notReceived = requestedObjectIds Set.\\ Map.keysSet receivedObjects + downloadedObjects' = downloadedObjects ps <> receivedObjects + -- Add not received objects to `unknownObjects` before acknowledging objectIds. + unknownObjects' = unknownObjects ps <> notReceived + + requestedObjectsInflight' = + assert (requestedObjectIds `Set.isSubsetOf` requestedObjectsInflight ps) $ + requestedObjectsInflight ps Set.\\ requestedObjectIds + + requestedSize = fold $ availableObjectIds ps `Map.restrictKeys` requestedObjectIds + requestedObjectsInflightSize' = + assert (requestedObjectsInflightSize ps >= requestedSize) $ + requestedObjectsInflightSize ps - requestedSize + + -- subtract requested from in-flight + inflightObjects'' = + Map.merge + (Map.mapMaybeMissing \_ x -> Just x) + (Map.mapMaybeMissing \_ _ -> assert False Nothing) + ( Map.zipWithMaybeMatched \_ x y -> + assert + (x >= y) + let z = x - y + in if z > 0 + then Just z + else Nothing + ) + (inflightObjects st) + (Map.fromSet (const 1) requestedObjectIds) + + inflightObjectsSize'' = + assert (inflightObjectsSize st >= requestedSize) $ + inflightObjectsSize st - requestedSize + + st' = + st + { inflightObjects = inflightObjects'' + , inflightObjectsSize = inflightObjectsSize'' + } + + -- + -- Update PeerObjectState + -- + + -- Remove the downloaded `objectId`s from the availableObjectIds map, this + -- guarantees that we won't attempt to download the `objectIds` from this peer + -- once we collect the `objectId`s. Also restrict keys to `liveSet`. + -- + -- NOTE: we could remove `notReceived` from `availableObjectIds`; and + -- possibly avoid using `unknownObjects` field at all. + -- + availableObjectIds'' = + availableObjectIds ps + `Map.withoutKeys` requestedObjectIds + + -- Remove all acknowledged `objectId`s from unknown set, but only those + -- which are not present in `unacknowledgedObjectIds'` + unknownObjects'' = + unknownObjects' + `Set.intersection` live + where + -- We cannot use `liveSet` as `unknown <> notReceived` might + -- contain `objectIds` which are in `liveSet` but are not `live`. + live = Set.fromList (toList (unacknowledgedObjectIds ps)) + + ps'' = + ps + { availableObjectIds = availableObjectIds'' + , unknownObjects = unknownObjects'' + , requestedObjectsInflightSize = requestedObjectsInflightSize' + , requestedObjectsInflight = requestedObjectsInflight' + , downloadedObjects = downloadedObjects' + } + +-- +-- Monadic public API +-- + +type SharedObjectStateVar m peeraddr objectId object = + StrictTVar m (SharedObjectState peeraddr objectId object) + +newSharedObjectStateVar :: + MonadSTM m => + StdGen -> + m (SharedObjectStateVar m peeraddr objectId object) +newSharedObjectStateVar rng = + newTVarIO + SharedObjectState + { peerObjectStates = Map.empty + , inflightObjects = Map.empty + , inflightObjectsSize = 0 + , bufferedObjects = Map.empty + , referenceCounts = Map.empty + , timedObjects = Map.empty + , inSubmissionToObjectPoolObjects = Map.empty + , peerRng = rng + } + +-- | Acknowledge `objectId`s, return the number of `objectIds` to be acknowledged to the +-- remote side. +receivedObjectIds :: + forall m peeraddr idx object objectId. + (MonadSTM m, Ord objectId, Ord peeraddr) => + Tracer m (TraceObjectLogic peeraddr objectId object) -> + SharedObjectStateVar m peeraddr objectId object -> + STM m (ObjectPoolSnapshot objectId object idx) -> + peeraddr -> + -- | number of requests to subtract from + -- `requestedObjectIdsInflight` + NumObjectIdsToReq -> + -- | sequence of received `objectIds` + StrictSeq objectId -> + -- | received `objectId`s with sizes + Map objectId SizeInBytes -> + m () +receivedObjectIds tracer sharedVar getObjectPoolSnapshot peeraddr reqNo objectIdsSeq objectIdsMap = do + st <- atomically $ do + ObjectPoolSnapshot{objectpoolHasObject} <- getObjectPoolSnapshot + stateTVar + sharedVar + ((\a -> (a, a)) . receivedObjectIdsImpl objectpoolHasObject peeraddr reqNo objectIdsSeq objectIdsMap) + traceWith tracer (TraceSharedObjectState "receivedObjectIds" st) + +-- | Include received `object`s in `SharedObjectState`. Return number of `objectIds` +-- to be acknowledged and list of `object` to be added to the objectpool. +collectObjects :: + forall m peeraddr object objectId. + ( MonadSTM m + , Ord objectId + , Ord peeraddr + , Show objectId + , Typeable objectId + ) => + Tracer m (TraceObjectLogic peeraddr objectId object) -> + (object -> SizeInBytes) -> + SharedObjectStateVar m peeraddr objectId object -> + peeraddr -> + -- | set of requested objectIds with their announced size + Map objectId SizeInBytes -> + -- | received objects + Map objectId object -> + -- | number of objectIds to be acknowledged and objects to be added to the + -- objectpool + m (Maybe ObjectDiffusionProtocolError) +collectObjects tracer objectSize sharedVar peeraddr objectIdsRequested objectsMap = do + r <- atomically $ do + st <- readTVar sharedVar + case collectObjectsImpl objectSize peeraddr objectIdsRequested objectsMap st of + r@(Right st') -> + writeTVar sharedVar st' + $> r + r@Left{} -> pure r + case r of + Right st -> + traceWith tracer (TraceSharedObjectState "collectObjects" st) + $> Nothing + Left e -> return (Just e) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs new file mode 100644 index 0000000000..4a6da67f43 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -0,0 +1,427 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE ExistentialQuantification #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE GeneralisedNewtypeDeriving #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeApplications #-} + +module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types + ( -- * PeerObjectState + PeerObjectState (..) + + -- * SharedObjectState + , SharedObjectState (..) + + -- * Decisions + , ObjectsToObjectPool (..) + , ObjectDecision (..) + , emptyObjectDecision + , TraceObjectLogic (..) + , ObjectDiffusionInitDelay (..) + , defaultObjectDiffusionInitDelay + + -- * Types shared with V1 + + -- ** Various + , ProcessedObjectCount (..) + , ObjectDiffusionLogicVersion (..) + + -- ** ObjectPool API + , ObjectDiffusionObjectPoolWriter (..) + + -- ** Traces + , TraceObjectDiffusionInbound (..) + , ObjectDiffusionCounters (..) + , mkObjectDiffusionCounters + + -- ** Protocol Error + , ObjectDiffusionProtocolError (..) + ) where + +import Control.Exception (Exception (..)) +import Control.Monad.Class.MonadTime.SI +import Data.Map.Strict (Map) +import Data.Map.Strict qualified as Map +import Data.Monoid (Sum (..)) +import Data.Sequence.Strict (StrictSeq) +import Data.Set (Set) +import Data.Set qualified as Set +import Data.Typeable (Typeable, eqT, (:~:) (Refl)) +import GHC.Generics (Generic) +import NoThunks.Class (NoThunks (..)) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type +import System.Random (StdGen) + +-- | Flag to enable/disable the usage of the new object-submission logic. +data ObjectDiffusionLogicVersion + = -- | the legacy `Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1` + ObjectDiffusionLogicV1 + | -- | the new `Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2` + ObjectDiffusionLogicV2 + deriving (Eq, Show, Enum, Bounded) + +-- +-- PeerObjectState, SharedObjectState +-- + +data PeerObjectState objectId object = PeerObjectState + { unacknowledgedObjectIds :: !(StrictSeq objectId) + -- ^ Those transactions (by their identifier) that the client has told + -- us about, and which we have not yet acknowledged. This is kept in + -- the order in which the client gave them to us. This is the same order + -- in which we submit them to the objectpool (or for this example, the final + -- result order). It is also the order we acknowledge in. + , availableObjectIds :: !(Map objectId SizeInBytes) + -- ^ Set of known transaction ids which can be requested from this peer. + , requestedObjectIdsInflight :: !NumObjectIdsToReq + -- ^ The number of transaction identifiers that we have requested but + -- which have not yet been replied to. We need to track this it keep + -- our requests within the limit on the number of unacknowledged objectIds. + , requestedObjectsInflightSize :: !SizeInBytes + -- ^ The size in bytes of transactions that we have requested but which + -- have not yet been replied to. We need to track this to keep our + -- requests within the `maxObjectsSizeInflight` limit. + , requestedObjectsInflight :: !(Set objectId) + -- ^ The set of requested `objectId`s. + , unknownObjects :: !(Set objectId) + -- ^ A subset of `unacknowledgedObjectIds` which were unknown to the peer + -- (i.e. requested but not received). We need to track these `objectId`s + -- since they need to be acknowledged. + -- + -- We track these `objectId` per peer, rather than in `bufferedObjects` map, + -- since that could potentially lead to corrupting the node, not being + -- able to download a `object` which is needed & available from other nodes. + , score :: !Double + -- ^ Score is a metric that tracks how usefull a peer has been. + -- The larger the value the less usefull peer. It slowly decays towards + -- zero. + , scoreTs :: !Time + -- ^ Timestamp for the last time `score` was drained. + , downloadedObjects :: !(Map objectId object) + -- ^ A set of OBJECTs downloaded from the peer. They are not yet + -- acknowledged and haven't been sent to the objectpool yet. + -- + -- Life cycle of entries: + -- * added when a object is downloaded (see `collectObjectsImpl`) + -- * follows `unacknowledgedObjectIds` (see `acknowledgeObjectIds`) + , toObjectPoolObjects :: !(Map objectId object) + -- ^ A set of OBJECTs on their way to the objectpool. + -- Tracked here so that we can cleanup `inSubmissionToObjectPoolObjects` if the + -- peer dies. + -- + -- Life cycle of entries: + -- * added by `acknowledgeObjectIds` (where decide which objects can be + -- submitted to the objectpool) + -- * removed by `withObjectPoolSem` + } + deriving (Eq, Show, Generic) + +instance + ( NoThunks objectId + , NoThunks object + ) => + NoThunks (PeerObjectState objectId object) + +-- | Shared state of all `ObjectDiffusion` clients. +-- +-- New `objectId` enters `unacknowledgedObjectIds` it is also added to `availableObjectIds` +-- and `referenceCounts` (see `acknowledgeObjectIdsImpl`). +-- +-- When a `objectId` id is selected to be downloaded, it's added to +-- `requestedObjectsInflightSize` (see +-- `Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.Decision.pickObjectsToDownload`). +-- +-- When the request arrives, the `objectId` is removed from `inflightObjects`. It +-- might be added to `unknownObjects` if the server didn't have that `objectId`, or +-- it's added to `bufferedObjects` (see `collectObjectsImpl`). +-- +-- Whenever we choose `objectId` to acknowledge (either in `acknowledobjectsIdsImpl`, +-- `collectObjectsImpl` or +-- `Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.Decision.pickObjectsToDownload`, we also +-- recalculate `referenceCounts` and only keep live `objectId`s in other maps (e.g. +-- `availableObjectIds`, `bufferedObjects`, `unknownObjects`). +data SharedObjectState peeraddr objectId object = SharedObjectState + { peerObjectStates :: !(Map peeraddr (PeerObjectState objectId object)) + -- ^ Map of peer states. + -- + -- /Invariant:/ for peeraddr's which are registered using `withPeer`, + -- there's always an entry in this map even if the set of `objectId`s is + -- empty. + , inflightObjects :: !(Map objectId Int) + -- ^ Set of transactions which are in-flight (have already been + -- requested) together with multiplicities (from how many peers it is + -- currently in-flight) + -- + -- This set can intersect with `availableObjectIds`. + , inflightObjectsSize :: !SizeInBytes + -- ^ Overall size of all `object`s in-flight. + , bufferedObjects :: !(Map objectId (Maybe object)) + -- ^ Map of `object` which: + -- + -- * were downloaded and added to the objectpool, + -- * are already in the objectpool (`Nothing` is inserted in that case), + -- + -- We only keep live `objectId`, e.g. ones which `objectId` is unacknowledged by + -- at least one peer or has a `timedObjects` entry. + -- + -- /Note:/ `objectId`s which `object` were unknown by a peer are tracked + -- separately in `unknownObjects`. + -- + -- /Note:/ previous implementation also needed to explicitly track + -- `objectId`s which were already acknowledged, but are still unacknowledged. + -- In this implementation, this is done using reference counting. + -- + -- This map is useful to acknowledge `objectId`s, it's basically taking the + -- longest prefix which contains entries in `bufferedObjects` or `unknownObjects`. + , referenceCounts :: !(Map objectId Int) + -- ^ We track reference counts of all unacknowledged and timedObjects objectIds. + -- Once the count reaches 0, a object is removed from `bufferedObjects`. + -- + -- The `bufferedObject` map contains a subset of `objectId` which + -- `referenceCounts` contains. + -- + -- /Invariants:/ + -- + -- * the objectId count is equal to multiplicity of objectId in all + -- `unacknowledgedObjectIds` sequences; + -- * @Map.keysSet bufferedObjects `Set.isSubsetOf` Map.keysSet referenceCounts@; + -- * all counts are positive integers. + , timedObjects :: !(Map Time [objectId]) + -- ^ A set of timeouts for objectIds that have been added to bufferedObjects after being + -- inserted into the objectpool. + -- + -- We need these short timeouts to avoid re-downloading a `object`. We could + -- acknowledge this `objectId` to all peers, when a peer from another + -- continent presents us it again. + -- + -- Every objectId entry has a reference count in `referenceCounts`. + , inSubmissionToObjectPoolObjects :: !(Map objectId Int) + -- ^ A set of objectIds that have been downloaded by a peer and are on their + -- way to the objectpool. We won't issue further fetch-requests for OBJECTs in + -- this state. We track these objects to not re-download them from another + -- peer. + -- + -- * We subtract from the counter when a given object is added or rejected by + -- the objectpool or do that for all objects in `toObjectPoolObjects` when a peer is + -- unregistered. + -- * We add to the counter when a given object is selected to be added to the + -- objectpool in `pickObjectsToDownload`. + , peerRng :: !StdGen + -- ^ Rng used to randomly order peers + } + deriving (Eq, Show, Generic) + +instance + ( NoThunks peeraddr + , NoThunks object + , NoThunks objectId + , NoThunks StdGen + ) => + NoThunks (SharedObjectState peeraddr objectId object) + +-- +-- Decisions +-- + +newtype ObjectsToObjectPool objectId object = ObjectsToObjectPool {listOfObjectsToObjectPool :: [(objectId, object)]} + deriving newtype (Eq, Show, Semigroup, Monoid) + +-- | Decision made by the decision logic. Each peer will receive a 'Decision'. +-- +-- /note:/ it is rather non-standard to represent a choice between requesting +-- `objectId`s and `object`'s as a product rather than a sum type. The client will +-- need to download `object`s first and then send a request for more objectIds (and +-- acknowledge some `objectId`s). Due to pipelining each client will request +-- decision from the decision logic quite often (every two pipelined requests), +-- but with this design a decision once taken will make the peer non-active +-- (e.g. it won't be returned by `filterActivePeers`) for longer, and thus the +-- expensive `makeDecision` computation will not need to take that peer into +-- account. +data ObjectDecision objectId object = ObjectDecision + { objectIdsToAcknowledge :: !NumObjectIdsToAck + -- ^ objectId's to acknowledge + , objectIdsToRequest :: !NumObjectIdsToReq + -- ^ number of objectId's to request + , objectPipelineObjectIds :: !Bool + -- ^ the object-submission protocol only allows to pipeline `objectId`'s requests + -- if we have non-acknowledged `objectId`s. + , objectsToRequest :: !(Map objectId SizeInBytes) + -- ^ objectId's to download. + , objectsToObjectPool :: !(ObjectsToObjectPool objectId object) + -- ^ list of `object`s to submit to the objectpool. + } + deriving (Show, Eq) + +-- | A non-commutative semigroup instance. +-- +-- /note:/ this instance must be consistent with `pickObjectsToDownload` and how +-- `PeerObjectState` is updated. It is designed to work with `TMergeVar`s. +instance Ord objectId => Semigroup (ObjectDecision objectId object) where + ObjectDecision + { objectIdsToAcknowledge + , objectIdsToRequest + , objectPipelineObjectIds = _ignored + , objectsToRequest + , objectsToObjectPool + } + <> ObjectDecision + { objectIdsToAcknowledge = objectIdsToAcknowledge' + , objectIdsToRequest = objectIdsToRequest' + , objectPipelineObjectIds = objectPipelineObjectIds' + , objectsToRequest = objectsToRequest' + , objectsToObjectPool = objectsToObjectPool' + } = + ObjectDecision + { objectIdsToAcknowledge = objectIdsToAcknowledge + objectIdsToAcknowledge' + , objectIdsToRequest = objectIdsToRequest + objectIdsToRequest' + , objectPipelineObjectIds = objectPipelineObjectIds' + , objectsToRequest = objectsToRequest <> objectsToRequest' + , objectsToObjectPool = objectsToObjectPool <> objectsToObjectPool' + } + +-- | A no-op decision. +emptyObjectDecision :: ObjectDecision objectId object +emptyObjectDecision = + ObjectDecision + { objectIdsToAcknowledge = 0 + , objectIdsToRequest = 0 + , objectPipelineObjectIds = False + , objectsToRequest = Map.empty + , objectsToObjectPool = mempty + } + +-- | ObjectLogic tracer. +data TraceObjectLogic peeraddr objectId object + = TraceSharedObjectState String (SharedObjectState peeraddr objectId object) + | TraceObjectDecisions (Map peeraddr (ObjectDecision objectId object)) + deriving Show + +data ProcessedObjectCount = ProcessedObjectCount + { pobjectcAccepted :: Int + -- ^ Just accepted this many transactions. + , pobjectcRejected :: Int + -- ^ Just rejected this many transactions. + , pobjectcScore :: Double + } + deriving (Eq, Show) + +-- | The consensus layer functionality that the inbound side of the object +-- submission logic requires. +-- +-- This is provided to the object submission logic by the consensus layer. +data ObjectDiffusionObjectPoolWriter objectId object idx m + = ObjectDiffusionObjectPoolWriter + { objectId :: object -> objectId + -- ^ Compute the transaction id from a transaction. + -- + -- This is used in the protocol handler to verify a full transaction + -- matches a previously given transaction id. + , objectpoolAddObjects :: [object] -> m [objectId] + -- ^ Supply a batch of transactions to the objectpool. They are either + -- accepted or rejected individually, but in the order supplied. + -- + -- The 'objectId's of all transactions that were added successfully are + -- returned. + } + +data TraceObjectDiffusionInbound objectId object + = -- | Number of transactions just about to be inserted. + TraceObjectDiffusionCollected [objectId] + | -- | Just processed transaction pass/fail breakdown. + TraceObjectDiffusionProcessed ProcessedObjectCount + | TraceObjectInboundCanRequestMoreObjects Int + | TraceObjectInboundCannotRequestMoreObjects Int + | TraceObjectInboundAddedToObjectPool [objectId] DiffTime + | TraceObjectInboundRejectedFromObjectPool [objectId] DiffTime + | TraceObjectInboundError ObjectDiffusionProtocolError + | -- + -- messages emitted by the new implementation of the server in + -- "Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.Server"; some of them are also + -- used in this module. + -- + + -- | Server received 'MsgDone' + TraceObjectInboundTerminated + | TraceObjectInboundDecision (ObjectDecision objectId object) + deriving (Eq, Show) + +data ObjectDiffusionCounters + = ObjectDiffusionCounters + { numOfOutstandingObjectIds :: Int + -- ^ objectIds which are not yet downloaded. This is a diff of keys sets of + -- `referenceCounts` and a sum of `bufferedObjects` and + -- `inbubmissionToObjectPoolObjects` maps. + , numOfBufferedObjects :: Int + -- ^ number of all buffered objects (downloaded or not available) + , numOfInSubmissionToObjectPoolObjects :: Int + -- ^ number of all object's which were submitted to the objectpool + , numOfObjectIdsInflight :: Int + -- ^ number of all in-flight objectId's. + } + deriving (Eq, Show) + +mkObjectDiffusionCounters :: + Ord objectId => + SharedObjectState peeraddr objectId object -> + ObjectDiffusionCounters +mkObjectDiffusionCounters + SharedObjectState + { inflightObjects + , bufferedObjects + , referenceCounts + , inSubmissionToObjectPoolObjects + } = + ObjectDiffusionCounters + { numOfOutstandingObjectIds = + Set.size $ + Map.keysSet referenceCounts + Set.\\ Map.keysSet bufferedObjects + Set.\\ Map.keysSet inSubmissionToObjectPoolObjects + , numOfBufferedObjects = Map.size bufferedObjects + , numOfInSubmissionToObjectPoolObjects = Map.size inSubmissionToObjectPoolObjects + , numOfObjectIdsInflight = getSum $ foldMap Sum inflightObjects + } + +data ObjectDiffusionProtocolError + = ProtocolErrorObjectNotRequested + | ProtocolErrorObjectIdsNotRequested + | -- | a list of objectId for which the received size and advertised size didn't + -- match. + forall objectId. + (Typeable objectId, Show objectId, Eq objectId) => + ProtocolErrorObjectSizeError [(objectId, SizeInBytes, SizeInBytes)] + +instance Eq ObjectDiffusionProtocolError where + ProtocolErrorObjectNotRequested == ProtocolErrorObjectNotRequested = True + ProtocolErrorObjectNotRequested == _ = False + ProtocolErrorObjectIdsNotRequested == ProtocolErrorObjectIdsNotRequested = True + ProtocolErrorObjectIdsNotRequested == _ = True + ProtocolErrorObjectSizeError (as :: [(a, SizeInBytes, SizeInBytes)]) + == ProtocolErrorObjectSizeError (as' :: [(a', SizeInBytes, SizeInBytes)]) = + case eqT @a @a' of + Nothing -> False + Just Refl -> as == as' + ProtocolErrorObjectSizeError{} == _ = False + +deriving instance Show ObjectDiffusionProtocolError + +instance Exception ObjectDiffusionProtocolError where + displayException ProtocolErrorObjectNotRequested = + "The peer replied with a transaction we did not ask for." + displayException ProtocolErrorObjectIdsNotRequested = + "The peer replied with more objectIds than we asked for." + displayException (ProtocolErrorObjectSizeError objectIds) = + "The peer received objects with wrong sizes " ++ show objectIds + +data ObjectDiffusionInitDelay + = ObjectDiffusionInitDelay DiffTime + | NoObjectDiffusionInitDelay + deriving (Eq, Show) + +defaultObjectDiffusionInitDelay :: ObjectDiffusionInitDelay +defaultObjectDiffusionInitDelay = ObjectDiffusionInitDelay 60 diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/ObjectPool/API.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/ObjectPool/API.hs index 2f949d8b3b..37c37743e5 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/ObjectPool/API.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/ObjectPool/API.hs @@ -26,11 +26,15 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API ( ObjectPoolReader (..) , ObjectPoolWriter (..) + , SizeInBytes -- TODO: remove ) where import Control.Concurrent.Class.MonadSTM.Strict (STM) +import Data.Void (Void) -- TODO: remove import Data.Word (Word64) +type SizeInBytes = Void -- TODO: remove + -- | Interface used by the outbound side of object diffusion as its source of -- objects to give to the remote side. data ObjectPoolReader objectId object ticketNo m diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/PerasCert.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/PerasCert.hs index 5c024618b0..878f4630f8 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/PerasCert.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/PerasCert.hs @@ -14,8 +14,8 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.PerasCert ) where import Ouroboros.Consensus.Block -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.State +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1.State +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1 import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Outbound import Ouroboros.Consensus.Storage.PerasCertDB.API diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ObjectDiffusion/Smoke.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ObjectDiffusion/Smoke.hs index 8e12f01d6d..ab9d34aaee 100644 --- a/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ObjectDiffusion/Smoke.hs +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ObjectDiffusion/Smoke.hs @@ -28,7 +28,7 @@ import Network.TypedProtocol.Channel (Channel, createConnectedChannels) import Network.TypedProtocol.Codec (AnyMessage) import Network.TypedProtocol.Driver.Simple (runPeer, runPipelinedPeer) import NoThunks.Class (NoThunks) -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1 ( objectDiffusionInbound ) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.State From e8458cbc9ed39f95b02948dda3341113360da59c Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Mon, 29 Sep 2025 15:59:44 +0200 Subject: [PATCH 02/43] Some refactor Co-authored-by: nbacquey --- .../ObjectDiffusion/Inbound/V2.hs | 6 +- .../ObjectDiffusion/Inbound/V2/Decision.hs | 78 ++++++++-------- .../ObjectDiffusion/Inbound/V2/Policy.hs | 4 +- .../ObjectDiffusion/Inbound/V2/Registry.hs | 92 +++++++++---------- .../ObjectDiffusion/Inbound/V2/State.hs | 76 +++++++-------- .../ObjectDiffusion/Inbound/V2/Types.hs | 18 ++-- 6 files changed, 137 insertions(+), 137 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 7a82b0e3d6..5ccae726f3 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -8,7 +8,7 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 ( -- * ObjectDiffusion Inbound client - objectSubmissionInboundV2 + objectDiffusionInbound -- * PeerObjectAPI , withPeer @@ -48,7 +48,7 @@ import Ouroboros.Network.Protocol.ObjectDiffusion.Inbound -- there are object's to download it pipelines two requests: first for object's second -- for objectId's. If there are no object's to download, it either sends a blocking or -- non-blocking request for objectId's. -objectSubmissionInboundV2 :: +objectDiffusionInbound :: forall objectId object idx m. ( MonadDelay m , MonadThrow m @@ -59,7 +59,7 @@ objectSubmissionInboundV2 :: ObjectDiffusionObjectPoolWriter objectId object idx m -> PeerObjectAPI m objectId object -> ObjectDiffusionServerPipelined objectId object m () -objectSubmissionInboundV2 +objectDiffusionInbound tracer initDelay ObjectDiffusionObjectPoolWriter{objectId} diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index fcff50520c..d4c3527adb 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -39,23 +39,23 @@ import System.Random (random) -- | Make download decisions. makeDecisions :: - forall peeraddr objectId object. - ( Ord peeraddr + forall peerAddr objectId object. + ( Ord peerAddr , Ord objectId - , Hashable peeraddr + , Hashable peerAddr ) => -- | decision policy ObjectDecisionPolicy -> -- | decision context - SharedObjectState peeraddr objectId object -> + SharedObjectState peerAddr objectId object -> -- | list of available peers. -- -- This is a subset of `peerObjectStates` of peers which either: -- * can be used to download a `object`, -- * can acknowledge some `objectId`s. - Map peeraddr (PeerObjectState objectId object) -> - ( SharedObjectState peeraddr objectId object - , Map peeraddr (ObjectDecision objectId object) + Map peerAddr (PeerObjectState objectId object) -> + ( SharedObjectState peerAddr objectId object + , Map peerAddr (ObjectDecision objectId object) ) makeDecisions policy st = let (salt, rng') = random (peerRng st) @@ -66,28 +66,28 @@ makeDecisions policy st = where fn :: forall a. - (a, [(peeraddr, ObjectDecision objectId object)]) -> - (a, Map peeraddr (ObjectDecision objectId object)) + (a, [(peerAddr, ObjectDecision objectId object)]) -> + (a, Map peerAddr (ObjectDecision objectId object)) fn (a, as) = (a, Map.fromList as) -- | Order peers by how useful the OBJECTs they have provided are. -- -- OBJECTs delivered late will fail to apply because they were included in -- a recently adopted block. Peers can race against each other by setting --- `objectInflightMultiplicity` to > 1. In case of a tie a hash of the peeraddr +-- `objectInflightMultiplicity` to > 1. In case of a tie a hash of the peerAddr -- is used as a tie breaker. Since every invocation use a new salt a given --- peeraddr does not have an advantage over time. +-- peerAddr does not have an advantage over time. orderByRejections :: - Hashable peeraddr => + Hashable peerAddr => Int -> - Map peeraddr (PeerObjectState objectId object) -> - [(peeraddr, PeerObjectState objectId object)] + Map peerAddr (PeerObjectState objectId object) -> + [(peerAddr, PeerObjectState objectId object)] orderByRejections salt = - List.sortOn (\(peeraddr, ps) -> (score ps, hashWithSalt salt peeraddr)) + List.sortOn (\(peerAddr, ps) -> (score ps, hashWithSalt salt peerAddr)) . Map.toList -- | Internal state of `pickObjectsToDownload` computation. -data St peeraddr objectId object +data St peerAddr objectId object = St { stInflightSize :: !SizeInBytes -- ^ size of all `object`s in-flight. @@ -113,17 +113,17 @@ data St peeraddr objectId object -- * each object can be downloaded simultaneously from at most -- `objectInflightMultiplicity` peers. pickObjectsToDownload :: - forall peeraddr objectId object. - ( Ord peeraddr + forall peerAddr objectId object. + ( Ord peerAddr , Ord objectId ) => -- | decision policy ObjectDecisionPolicy -> -- | shared state - SharedObjectState peeraddr objectId object -> - [(peeraddr, PeerObjectState objectId object)] -> - ( SharedObjectState peeraddr objectId object - , [(peeraddr, ObjectDecision objectId object)] + SharedObjectState peerAddr objectId object -> + [(peerAddr, PeerObjectState objectId object)] -> + ( SharedObjectState peerAddr objectId object + , [(peerAddr, ObjectDecision objectId object)] ) pickObjectsToDownload policy@ObjectDecisionPolicy @@ -139,7 +139,7 @@ pickObjectsToDownload , inSubmissionToObjectPoolObjects , referenceCounts } = - -- outer fold: fold `[(peeraddr, PeerObjectState objectId object)]` + -- outer fold: fold `[(peerAddr, PeerObjectState objectId object)]` List.mapAccumR accumFn -- initial state @@ -152,10 +152,10 @@ pickObjectsToDownload >>> gn where accumFn :: - St peeraddr objectId object -> - (peeraddr, PeerObjectState objectId object) -> - ( St peeraddr objectId object - , ( (peeraddr, PeerObjectState objectId object) + St peerAddr objectId object -> + (peerAddr, PeerObjectState objectId object) -> + ( St peerAddr objectId object + , ( (peerAddr, PeerObjectState objectId object) , ObjectDecision objectId object ) ) @@ -166,7 +166,7 @@ pickObjectsToDownload , stAcknowledged , stInSubmissionToObjectPoolObjects } - ( peeraddr + ( peerAddr , peerObjectState@PeerObjectState { availableObjectIds , unknownObjects @@ -200,7 +200,7 @@ pickObjectsToDownload , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' } , - ( (peeraddr, peerObjectState') + ( (peerAddr, peerObjectState') , ObjectDecision { objectIdsToAcknowledge = numObjectIdsToAck , objectIdsToRequest = numObjectIdsToReq @@ -219,7 +219,7 @@ pickObjectsToDownload -- to in-flight size limits ( st , - ( (peeraddr, peerObjectState') + ( (peerAddr, peerObjectState') , emptyObjectDecision ) ) @@ -312,7 +312,7 @@ pickObjectsToDownload , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' } , - ( (peeraddr, peerObjectState'') + ( (peerAddr, peerObjectState'') , ObjectDecision { objectIdsToAcknowledge = numObjectIdsToAck , objectPipelineObjectIds = @@ -334,17 +334,17 @@ pickObjectsToDownload , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' } , - ( (peeraddr, peerObjectState'') + ( (peerAddr, peerObjectState'') , emptyObjectDecision{objectsToRequest = objectsToRequestMap} ) ) gn :: - ( St peeraddr objectId object - , [((peeraddr, PeerObjectState objectId object), ObjectDecision objectId object)] + ( St peerAddr objectId object + , [((peerAddr, PeerObjectState objectId object), ObjectDecision objectId object)] ) -> - ( SharedObjectState peeraddr objectId object - , [(peeraddr, ObjectDecision objectId object)] + ( SharedObjectState peerAddr objectId object + , [(peerAddr, ObjectDecision objectId object)] ) gn ( St @@ -426,12 +426,12 @@ pickObjectsToDownload -- | Filter peers which can either download a `object` or acknowledge `objectId`s. filterActivePeers :: - forall peeraddr objectId object. + forall peerAddr objectId object. Ord objectId => HasCallStack => ObjectDecisionPolicy -> - SharedObjectState peeraddr objectId object -> - Map peeraddr (PeerObjectState objectId object) + SharedObjectState peerAddr objectId object -> + Map peerAddr (PeerObjectState objectId object) filterActivePeers policy@ObjectDecisionPolicy { maxUnacknowledgedObjectIds diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs index 5858845617..f85f23f16c 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -20,7 +20,7 @@ import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsToReq (..)) -- -- * `ObjectDecisionPolicy` -- * `maximumIngressQueue` for `object-submission` mini-protocol, see --- `Ouroboros.Consensus.MiniProtocol.NodeToNode.objectSubmissionProtocolLimits` +-- `Ouroboros.Consensus.MiniProtocol.NodeToNode.objectDiffusionProtocolLimits` max_OBJECT_SIZE :: SizeInBytes max_OBJECT_SIZE = 65_540 @@ -58,7 +58,7 @@ defaultObjectDecisionPolicy :: ObjectDecisionPolicy defaultObjectDecisionPolicy = ObjectDecisionPolicy { maxNumObjectIdsToRequest = 3 - , maxUnacknowledgedObjectIds = 10 -- must be the same as objectSubmissionMaxUnacked + , maxUnacknowledgedObjectIds = 10 -- must be the same as objectDiffusionMaxUnacked , objectsSizeInflightPerPeer = max_OBJECT_SIZE * 6 , maxObjectsSizeInflight = max_OBJECT_SIZE * 20 , objectInflightMultiplicity = 2 diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 2a954aa43d..67c3c2494f 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -46,14 +46,14 @@ import Ouroboros.Network.Protocol.ObjectDiffusion.Type -- | Communication channels between `ObjectDiffusion` client mini-protocol and -- decision logic. -newtype ObjectChannels m peeraddr objectId object = ObjectChannels - { objectChannelMap :: Map peeraddr (StrictMVar m (ObjectDecision objectId object)) +newtype ObjectChannels m peerAddr objectId object = ObjectChannels + { objectChannelMap :: Map peerAddr (StrictMVar m (ObjectDecision objectId object)) } -type ObjectChannelsVar m peeraddr objectId object = - StrictMVar m (ObjectChannels m peeraddr objectId object) +type ObjectChannelsVar m peerAddr objectId object = + StrictMVar m (ObjectChannels m peerAddr objectId object) -newObjectChannelsVar :: MonadMVar m => m (ObjectChannelsVar m peeraddr objectId object) +newObjectChannelsVar :: MonadMVar m => m (ObjectChannelsVar m peerAddr objectId object) newObjectChannelsVar = newMVar (ObjectChannels Map.empty) newtype ObjectObjectPoolSem m = ObjectObjectPoolSem (TSem m) @@ -94,7 +94,7 @@ data ObjectObjectPoolResult = ObjectAccepted | ObjectRejected -- `SharedObjectStateVar` and `PeerObjectStateVar`s, which exposes `PeerObjectStateAPI`. -- `PeerObjectStateAPI` is only safe inside the `withPeer` scope. withPeer :: - forall object peeraddr objectId idx m a. + forall object peerAddr objectId idx m a. ( MonadMask m , MonadMVar m , MonadSTM m @@ -102,18 +102,18 @@ withPeer :: , Ord objectId , Show objectId , Typeable objectId - , Ord peeraddr - , Show peeraddr + , Ord peerAddr + , Show peerAddr ) => - Tracer m (TraceObjectLogic peeraddr objectId object) -> - ObjectChannelsVar m peeraddr objectId object -> + Tracer m (TraceObjectLogic peerAddr objectId object) -> + ObjectChannelsVar m peerAddr objectId object -> ObjectObjectPoolSem m -> ObjectDecisionPolicy -> - SharedObjectStateVar m peeraddr objectId object -> + SharedObjectStateVar m peerAddr objectId object -> ObjectDiffusionObjectPoolReader objectId object idx m -> ObjectDiffusionObjectPoolWriter objectId object idx m -> (object -> SizeInBytes) -> - peeraddr -> + peerAddr -> -- ^ new peer -- | callback which gives access to `PeerObjectStateAPI` @@ -128,7 +128,7 @@ withPeer ObjectDiffusionObjectPoolReader{objectpoolGetSnapshot} ObjectDiffusionObjectPoolWriter{objectpoolAddObjects} objectSize - peeraddr + peerAddr io = bracket ( do @@ -144,7 +144,7 @@ withPeer let !chann'' = fromMaybe chann mbChann in (chann'', Just chann'') ) - peeraddr + peerAddr objectChannelMap return ( ObjectChannels{objectChannelMap = objectChannelMap'} @@ -166,18 +166,18 @@ withPeer modifyMVar_ channelsVar \ObjectChannels{objectChannelMap} -> - return ObjectChannels{objectChannelMap = Map.delete peeraddr objectChannelMap} + return ObjectChannels{objectChannelMap = Map.delete peerAddr objectChannelMap} ) io where registerPeer :: - SharedObjectState peeraddr objectId object -> - SharedObjectState peeraddr objectId object + SharedObjectState peerAddr objectId object -> + SharedObjectState peerAddr objectId object registerPeer st@SharedObjectState{peerObjectStates} = st { peerObjectStates = Map.insert - peeraddr + peerAddr PeerObjectState { availableObjectIds = Map.empty , requestedObjectIdsInflight = 0 @@ -196,8 +196,8 @@ withPeer -- TODO: this function needs to be tested! -- Issue: https://github.com/IntersectMBO/ouroboros-network/issues/5151 unregisterPeer :: - SharedObjectState peeraddr objectId object -> - SharedObjectState peeraddr objectId object + SharedObjectState peerAddr objectId object -> + SharedObjectState peerAddr objectId object unregisterPeer st@SharedObjectState { peerObjectStates @@ -226,10 +226,10 @@ withPeer ) = Map.alterF ( \case - Nothing -> error ("ObjectDiffusion.withPeer: invariant violation for peer " ++ show peeraddr) + Nothing -> error ("ObjectDiffusion.withPeer: invariant violation for peer " ++ show peerAddr) Just a -> (a, Nothing) ) - peeraddr + peerAddr peerObjectStates referenceCounts' = @@ -336,8 +336,8 @@ withPeer updateBufferedObject :: Time -> ObjectObjectPoolResult -> - SharedObjectState peeraddr objectId object -> - SharedObjectState peeraddr objectId object + SharedObjectState peerAddr objectId object -> + SharedObjectState peerAddr objectId object updateBufferedObject _ ObjectRejected @@ -356,7 +356,7 @@ withPeer objectId inSubmissionToObjectPoolObjects - peerObjectStates' = Map.update fn peeraddr peerObjectStates + peerObjectStates' = Map.update fn peerAddr peerObjectStates where fn ps = Just $! ps{toObjectPoolObjects = Map.delete objectId (toObjectPoolObjects ps)} updateBufferedObject @@ -397,7 +397,7 @@ withPeer bufferedObjects' = Map.insert objectId (Just object) bufferedObjects - peerObjectStates' = Map.update fn peeraddr peerObjectStates + peerObjectStates' = Map.update fn peerAddr peerObjectStates where fn ps = Just $! ps{toObjectPoolObjects = Map.delete objectId (toObjectPoolObjects ps)} @@ -411,7 +411,7 @@ withPeer tracer sharedStateVar objectpoolGetSnapshot - peeraddr + peerAddr numObjectIdsToReq objectIdsSeq objectIdsMap @@ -423,7 +423,7 @@ withPeer -- \^ received objects m (Maybe ObjectDiffusionProtocolError) handleReceivedObjects objectIds objects = - collectObjects tracer objectSize sharedStateVar peeraddr objectIds objects + collectObjects tracer objectSize sharedStateVar peerAddr objectIds objects -- Update `score` & `scoreTs` fields of `PeerObjectState`, return the new -- updated `score`. @@ -435,13 +435,13 @@ withPeer m Double countRejectedObjects _ n | n < 0 = - error ("ObjectDiffusion.countRejectedObjects: invariant violation for peer " ++ show peeraddr) + error ("ObjectDiffusion.countRejectedObjects: invariant violation for peer " ++ show peerAddr) countRejectedObjects now n = atomically $ stateTVar sharedStateVar $ \st -> - let (result, peerObjectStates') = Map.alterF fn peeraddr (peerObjectStates st) + let (result, peerObjectStates') = Map.alterF fn peerAddr (peerObjectStates st) in (result, st{peerObjectStates = peerObjectStates'}) where fn :: Maybe (PeerObjectState objectId object) -> (Double, Maybe (PeerObjectState objectId object)) - fn Nothing = error ("ObjectDiffusion.withPeer: invariant violation for peer " ++ show peeraddr) + fn Nothing = error ("ObjectDiffusion.withPeer: invariant violation for peer " ++ show peerAddr) fn (Just ps) = (score ps', Just $! ps') where ps' = updateRejects policy now n ps @@ -467,15 +467,15 @@ updateRejects } drainRejectionThread :: - forall m peeraddr objectId object. + forall m peerAddr objectId object. ( MonadDelay m , MonadSTM m , MonadThread m , Ord objectId ) => - Tracer m (TraceObjectLogic peeraddr objectId object) -> + Tracer m (TraceObjectLogic peerAddr objectId object) -> ObjectDecisionPolicy -> - SharedObjectStateVar m peeraddr objectId object -> + SharedObjectStateVar m peerAddr objectId object -> m Void drainRejectionThread tracer policy sharedStateVar = do labelThisThread "object-rejection-drain" @@ -511,21 +511,21 @@ drainRejectionThread tracer policy sharedStateVar = do else go nextDrain decisionLogicThread :: - forall m peeraddr objectId object. + forall m peerAddr objectId object. ( MonadDelay m , MonadMVar m , MonadSTM m , MonadMask m , MonadFork m - , Ord peeraddr + , Ord peerAddr , Ord objectId - , Hashable peeraddr + , Hashable peerAddr ) => - Tracer m (TraceObjectLogic peeraddr objectId object) -> + Tracer m (TraceObjectLogic peerAddr objectId object) -> Tracer m ObjectDiffusionCounters -> ObjectDecisionPolicy -> - ObjectChannelsVar m peeraddr objectId object -> - SharedObjectStateVar m peeraddr objectId object -> + ObjectChannelsVar m peerAddr objectId object -> + SharedObjectStateVar m peerAddr objectId object -> m Void decisionLogicThread tracer counterTracer policy objectChannelsVar sharedStateVar = do labelThisThread "object-decision" @@ -573,21 +573,21 @@ decisionLogicThread tracer counterTracer policy objectChannelsVar sharedStateVar -- | Run `decisionLogicThread` and `drainRejectionThread`. decisionLogicThreads :: - forall m peeraddr objectId object. + forall m peerAddr objectId object. ( MonadDelay m , MonadMVar m , MonadMask m , MonadAsync m , MonadFork m - , Ord peeraddr + , Ord peerAddr , Ord objectId - , Hashable peeraddr + , Hashable peerAddr ) => - Tracer m (TraceObjectLogic peeraddr objectId object) -> + Tracer m (TraceObjectLogic peerAddr objectId object) -> Tracer m ObjectDiffusionCounters -> ObjectDecisionPolicy -> - ObjectChannelsVar m peeraddr objectId object -> - SharedObjectStateVar m peeraddr objectId object -> + ObjectChannelsVar m peerAddr objectId object -> + SharedObjectStateVar m peerAddr objectId object -> m Void decisionLogicThreads tracer counterTracer policy objectChannelsVar sharedStateVar = uncurry (<>) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index b4bce755d4..0558ad9353 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -52,11 +52,11 @@ import System.Random (StdGen) -- acknowledgeObjectIds :: - forall peeraddr object objectId. + forall peerAddr object objectId. Ord objectId => HasCallStack => ObjectDecisionPolicy -> - SharedObjectState peeraddr objectId object -> + SharedObjectState peerAddr objectId object -> PeerObjectState objectId object -> -- | number of objectId to acknowledge, requests, objects which we can submit to the -- objectpool, objectIds to acknowledge with multiplicities, updated PeerObjectState. @@ -234,11 +234,11 @@ updateRefCounts referenceCounts (RefCountDiff diff) = diff tickTimedObjects :: - forall peeraddr object objectId. + forall peerAddr object objectId. Ord objectId => Time -> - SharedObjectState peeraddr objectId object -> - SharedObjectState peeraddr objectId object + SharedObjectState peerAddr objectId object -> + SharedObjectState peerAddr objectId object tickTimedObjects now st@SharedObjectState @@ -289,12 +289,12 @@ tickTimedObjects -- | Insert received `objectId`s and return the number of objectIds to be acknowledged -- and the updated `SharedObjectState`. receivedObjectIdsImpl :: - forall peeraddr object objectId. - (Ord objectId, Ord peeraddr, HasCallStack) => + forall peerAddr object objectId. + (Ord objectId, Ord peerAddr, HasCallStack) => -- | check if objectId is in the objectpool, ref -- 'objectpoolHasObject' (objectId -> Bool) -> - peeraddr -> + peerAddr -> -- | number of requests to subtract from -- `requestedObjectIdsInflight` NumObjectIdsToReq -> @@ -302,11 +302,11 @@ receivedObjectIdsImpl :: StrictSeq objectId -> -- | received `objectId`s with sizes Map objectId SizeInBytes -> - SharedObjectState peeraddr objectId object -> - SharedObjectState peeraddr objectId object + SharedObjectState peerAddr objectId object -> + SharedObjectState peerAddr objectId object receivedObjectIdsImpl objectpoolHasObject - peeraddr + peerAddr reqNo objectIdsSeq objectIdsMap @@ -318,7 +318,7 @@ receivedObjectIdsImpl -- using `alterF` so the update of `PeerObjectState` is done in one lookup case Map.alterF (fmap Just . fn . fromJust) - peeraddr + peerAddr peerObjectStates of (st', peerObjectStates') -> st'{peerObjectStates = peerObjectStates'} @@ -327,7 +327,7 @@ receivedObjectIdsImpl -- updated `SharedObjectState`. fn :: PeerObjectState objectId object -> - ( SharedObjectState peeraddr objectId object + ( SharedObjectState peerAddr objectId object , PeerObjectState objectId object ) fn @@ -405,36 +405,36 @@ const_MAX_OBJECT_SIZE_DISCREPENCY :: SizeInBytes const_MAX_OBJECT_SIZE_DISCREPENCY = 32 collectObjectsImpl :: - forall peeraddr object objectId. - ( Ord peeraddr + forall peerAddr object objectId. + ( Ord peerAddr , Ord objectId , Show objectId , Typeable objectId ) => -- | compute object size (object -> SizeInBytes) -> - peeraddr -> + peerAddr -> -- | requested objectIds Map objectId SizeInBytes -> -- | received objects Map objectId object -> - SharedObjectState peeraddr objectId object -> + SharedObjectState peerAddr objectId object -> -- | Return list of `objectId` which sizes didn't match or a new state. -- If one of the `object` has wrong size, we return an error. The -- mini-protocol will throw, which will clean the state map from this peer. Either ObjectDiffusionProtocolError - (SharedObjectState peeraddr objectId object) + (SharedObjectState peerAddr objectId object) collectObjectsImpl objectSize - peeraddr + peerAddr requestedObjectIdsMap receivedObjects st@SharedObjectState{peerObjectStates} = -- using `alterF` so the update of `PeerObjectState` is done in one lookup case Map.alterF (fmap Just . fn . fromJust) - peeraddr + peerAddr peerObjectStates of (Right st', peerObjectStates') -> Right st'{peerObjectStates = peerObjectStates'} @@ -447,7 +447,7 @@ collectObjectsImpl PeerObjectState objectId object -> ( Either [(objectId, SizeInBytes, SizeInBytes)] - (SharedObjectState peeraddr objectId object) + (SharedObjectState peerAddr objectId object) , PeerObjectState objectId object ) fn ps = @@ -565,13 +565,13 @@ collectObjectsImpl -- Monadic public API -- -type SharedObjectStateVar m peeraddr objectId object = - StrictTVar m (SharedObjectState peeraddr objectId object) +type SharedObjectStateVar m peerAddr objectId object = + StrictTVar m (SharedObjectState peerAddr objectId object) newSharedObjectStateVar :: MonadSTM m => StdGen -> - m (SharedObjectStateVar m peeraddr objectId object) + m (SharedObjectStateVar m peerAddr objectId object) newSharedObjectStateVar rng = newTVarIO SharedObjectState @@ -588,12 +588,12 @@ newSharedObjectStateVar rng = -- | Acknowledge `objectId`s, return the number of `objectIds` to be acknowledged to the -- remote side. receivedObjectIds :: - forall m peeraddr idx object objectId. - (MonadSTM m, Ord objectId, Ord peeraddr) => - Tracer m (TraceObjectLogic peeraddr objectId object) -> - SharedObjectStateVar m peeraddr objectId object -> + forall m peerAddr idx object objectId. + (MonadSTM m, Ord objectId, Ord peerAddr) => + Tracer m (TraceObjectLogic peerAddr objectId object) -> + SharedObjectStateVar m peerAddr objectId object -> STM m (ObjectPoolSnapshot objectId object idx) -> - peeraddr -> + peerAddr -> -- | number of requests to subtract from -- `requestedObjectIdsInflight` NumObjectIdsToReq -> @@ -602,28 +602,28 @@ receivedObjectIds :: -- | received `objectId`s with sizes Map objectId SizeInBytes -> m () -receivedObjectIds tracer sharedVar getObjectPoolSnapshot peeraddr reqNo objectIdsSeq objectIdsMap = do +receivedObjectIds tracer sharedVar getObjectPoolSnapshot peerAddr reqNo objectIdsSeq objectIdsMap = do st <- atomically $ do ObjectPoolSnapshot{objectpoolHasObject} <- getObjectPoolSnapshot stateTVar sharedVar - ((\a -> (a, a)) . receivedObjectIdsImpl objectpoolHasObject peeraddr reqNo objectIdsSeq objectIdsMap) + ((\a -> (a, a)) . receivedObjectIdsImpl objectpoolHasObject peerAddr reqNo objectIdsSeq objectIdsMap) traceWith tracer (TraceSharedObjectState "receivedObjectIds" st) -- | Include received `object`s in `SharedObjectState`. Return number of `objectIds` -- to be acknowledged and list of `object` to be added to the objectpool. collectObjects :: - forall m peeraddr object objectId. + forall m peerAddr object objectId. ( MonadSTM m , Ord objectId - , Ord peeraddr + , Ord peerAddr , Show objectId , Typeable objectId ) => - Tracer m (TraceObjectLogic peeraddr objectId object) -> + Tracer m (TraceObjectLogic peerAddr objectId object) -> (object -> SizeInBytes) -> - SharedObjectStateVar m peeraddr objectId object -> - peeraddr -> + SharedObjectStateVar m peerAddr objectId object -> + peerAddr -> -- | set of requested objectIds with their announced size Map objectId SizeInBytes -> -- | received objects @@ -631,10 +631,10 @@ collectObjects :: -- | number of objectIds to be acknowledged and objects to be added to the -- objectpool m (Maybe ObjectDiffusionProtocolError) -collectObjects tracer objectSize sharedVar peeraddr objectIdsRequested objectsMap = do +collectObjects tracer objectSize sharedVar peerAddr objectIdsRequested objectsMap = do r <- atomically $ do st <- readTVar sharedVar - case collectObjectsImpl objectSize peeraddr objectIdsRequested objectsMap st of + case collectObjectsImpl objectSize peerAddr objectIdsRequested objectsMap st of r@(Right st') -> writeTVar sharedVar st' $> r diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 4a6da67f43..1f0ab5519c 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -145,11 +145,11 @@ instance -- `Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.Decision.pickObjectsToDownload`, we also -- recalculate `referenceCounts` and only keep live `objectId`s in other maps (e.g. -- `availableObjectIds`, `bufferedObjects`, `unknownObjects`). -data SharedObjectState peeraddr objectId object = SharedObjectState - { peerObjectStates :: !(Map peeraddr (PeerObjectState objectId object)) +data SharedObjectState peerAddr objectId object = SharedObjectState + { peerObjectStates :: !(Map peerAddr (PeerObjectState objectId object)) -- ^ Map of peer states. -- - -- /Invariant:/ for peeraddr's which are registered using `withPeer`, + -- /Invariant:/ for peerAddr's which are registered using `withPeer`, -- there's always an entry in this map even if the set of `objectId`s is -- empty. , inflightObjects :: !(Map objectId Int) @@ -217,12 +217,12 @@ data SharedObjectState peeraddr objectId object = SharedObjectState deriving (Eq, Show, Generic) instance - ( NoThunks peeraddr + ( NoThunks peerAddr , NoThunks object , NoThunks objectId , NoThunks StdGen ) => - NoThunks (SharedObjectState peeraddr objectId object) + NoThunks (SharedObjectState peerAddr objectId object) -- -- Decisions @@ -296,9 +296,9 @@ emptyObjectDecision = } -- | ObjectLogic tracer. -data TraceObjectLogic peeraddr objectId object - = TraceSharedObjectState String (SharedObjectState peeraddr objectId object) - | TraceObjectDecisions (Map peeraddr (ObjectDecision objectId object)) +data TraceObjectLogic peerAddr objectId object + = TraceSharedObjectState String (SharedObjectState peerAddr objectId object) + | TraceObjectDecisions (Map peerAddr (ObjectDecision objectId object)) deriving Show data ProcessedObjectCount = ProcessedObjectCount @@ -367,7 +367,7 @@ data ObjectDiffusionCounters mkObjectDiffusionCounters :: Ord objectId => - SharedObjectState peeraddr objectId object -> + SharedObjectState peerAddr objectId object -> ObjectDiffusionCounters mkObjectDiffusionCounters SharedObjectState From b22520855d02f2e184504be487dd4d3005f4d8de Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Mon, 29 Sep 2025 16:35:32 +0200 Subject: [PATCH 03/43] WIP: further refactor --- .../ObjectDiffusion/Inbound/V2.hs | 34 +- .../ObjectDiffusion/Inbound/V2/Decision.hs | 220 +++++------ .../ObjectDiffusion/Inbound/V2/Policy.hs | 60 ++- .../ObjectDiffusion/Inbound/V2/Registry.hs | 214 +++++------ .../ObjectDiffusion/Inbound/V2/State.hs | 347 +++++++++--------- .../ObjectDiffusion/Inbound/V2/Types.hs | 144 ++++---- 6 files changed, 496 insertions(+), 523 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 5ccae726f3..1565ab7af1 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -20,10 +20,10 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 , newObjectChannelsVar , ObjectObjectPoolSem , newObjectObjectPoolSem - , SharedObjectStateVar - , newSharedObjectStateVar - , ObjectDecisionPolicy (..) - , defaultObjectDecisionPolicy + , DecisionGlobalStateVar + , newDecisionGlobalStateVar + , PeerDecisionPolicy (..) + , defaultPeerDecisionPolicy ) where import Control.Exception (assert) @@ -44,19 +44,19 @@ import Ouroboros.Network.Protocol.ObjectDiffusion.Inbound -- | A object-submission inbound side (server, sic!). -- --- The server blocks on receiving `ObjectDecision` from the decision logic. If +-- The server blocks on receiving `PeerDecision` from the decision logic. If -- there are object's to download it pipelines two requests: first for object's second -- for objectId's. If there are no object's to download, it either sends a blocking or -- non-blocking request for objectId's. objectDiffusionInbound :: - forall objectId object idx m. + forall objectId object ticketNo m. ( MonadDelay m , MonadThrow m , Ord objectId ) => Tracer m (TraceObjectDiffusionInbound objectId object) -> ObjectDiffusionInitDelay -> - ObjectDiffusionObjectPoolWriter objectId object idx m -> + ObjectDiffusionObjectPoolWriter objectId object ticketNo m -> PeerObjectAPI m objectId object -> ObjectDiffusionServerPipelined objectId object m () objectDiffusionInbound @@ -64,7 +64,7 @@ objectDiffusionInbound initDelay ObjectDiffusionObjectPoolWriter{objectId} PeerObjectAPI - { readObjectDecision + { readPeerDecision , handleReceivedObjectIds , handleReceivedObjects , submitObjectToObjectPool @@ -79,11 +79,11 @@ objectDiffusionInbound m (ServerStIdle Z objectId object m ()) serverIdle = do -- Block on next decision. - object@ObjectDecision + object@PeerDecision { objectsToRequest = objectsToRequest , objectsToObjectPool = ObjectsToObjectPool{listOfObjectsToObjectPool} } <- - readObjectDecision + readPeerDecision traceWith tracer (TraceObjectInboundDecision object) let !collected = length listOfObjectsToObjectPool @@ -106,9 +106,9 @@ objectDiffusionInbound -- Pipelined request of objects serverReqObjects :: - ObjectDecision objectId object -> + PeerDecision objectId object -> m (ServerStIdle Z objectId object m ()) - serverReqObjects object@ObjectDecision{objectsToRequest = objectsToRequest} = + serverReqObjects object@PeerDecision{objectsToRequest = objectsToRequest} = pure $ SendMsgRequestObjectsPipelined objectsToRequest @@ -117,11 +117,11 @@ objectDiffusionInbound serverReqObjectIds :: forall (n :: N). Nat n -> - ObjectDecision objectId object -> + PeerDecision objectId object -> m (ServerStIdle n objectId object m ()) serverReqObjectIds n - ObjectDecision{objectIdsToRequest = 0} = + PeerDecision{objectIdsToRequest = 0} = case n of Zero -> serverIdle Succ _ -> handleReplies n @@ -131,7 +131,7 @@ objectDiffusionInbound -- the client side wouldn't have a chance to terminate the -- mini-protocol. Zero - ObjectDecision + PeerDecision { objectIdsToAcknowledge = objectIdsToAck , objectPipelineObjectIds = False , objectIdsToRequest = objectIdsToReq @@ -153,7 +153,7 @@ objectDiffusionInbound ) serverReqObjectIds n@Zero - ObjectDecision + PeerDecision { objectIdsToAcknowledge = objectIdsToAck , objectPipelineObjectIds = True , objectIdsToRequest = objectIdsToReq @@ -165,7 +165,7 @@ objectDiffusionInbound (handleReplies (Succ n)) serverReqObjectIds n@Succ{} - ObjectDecision + PeerDecision { objectIdsToAcknowledge = objectIdsToAck , objectPipelineObjectIds , objectIdsToRequest = objectIdsToReq diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index d4c3527adb..2af0e48429 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -9,8 +9,8 @@ {-# OPTIONS_GHC -Wno-unrecognised-pragmas #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision - ( ObjectDecision (..) - , emptyObjectDecision + ( PeerDecision (..) + , emptyPeerDecision -- * Internal API exposed for testing , makeDecisions @@ -45,29 +45,29 @@ makeDecisions :: , Hashable peerAddr ) => -- | decision policy - ObjectDecisionPolicy -> + PeerDecisionPolicy -> -- | decision context - SharedObjectState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object -> -- | list of available peers. -- - -- This is a subset of `peerObjectStates` of peers which either: + -- This is a subset of `peerStates` of peers which either: -- * can be used to download a `object`, -- * can acknowledge some `objectId`s. - Map peerAddr (PeerObjectState objectId object) -> - ( SharedObjectState peerAddr objectId object - , Map peerAddr (ObjectDecision objectId object) + Map peerAddr (DecisionPeerState objectId object) -> + ( DecisionGlobalState peerAddr objectId object + , Map peerAddr (PeerDecision objectId object) ) makeDecisions policy st = - let (salt, rng') = random (peerRng st) - st' = st{peerRng = rng'} + let (salt, rng') = random (orderRng st) + st' = st{orderRng = rng'} in fn . pickObjectsToDownload policy st' . orderByRejections salt where fn :: forall a. - (a, [(peerAddr, ObjectDecision objectId object)]) -> - (a, Map peerAddr (ObjectDecision objectId object)) + (a, [(peerAddr, PeerDecision objectId object)]) -> + (a, Map peerAddr (PeerDecision objectId object)) fn (a, as) = (a, Map.fromList as) -- | Order peers by how useful the OBJECTs they have provided are. @@ -80,8 +80,8 @@ makeDecisions policy st = orderByRejections :: Hashable peerAddr => Int -> - Map peerAddr (PeerObjectState objectId object) -> - [(peerAddr, PeerObjectState objectId object)] + Map peerAddr (DecisionPeerState objectId object) -> + [(peerAddr, DecisionPeerState objectId object)] orderByRejections salt = List.sortOn (\(peerAddr, ps) -> (score ps, hashWithSalt salt peerAddr)) . Map.toList @@ -118,45 +118,45 @@ pickObjectsToDownload :: , Ord objectId ) => -- | decision policy - ObjectDecisionPolicy -> + PeerDecisionPolicy -> -- | shared state - SharedObjectState peerAddr objectId object -> - [(peerAddr, PeerObjectState objectId object)] -> - ( SharedObjectState peerAddr objectId object - , [(peerAddr, ObjectDecision objectId object)] + DecisionGlobalState peerAddr objectId object -> + [(peerAddr, DecisionPeerState objectId object)] -> + ( DecisionGlobalState peerAddr objectId object + , [(peerAddr, PeerDecision objectId object)] ) pickObjectsToDownload - policy@ObjectDecisionPolicy + policy@PeerDecisionPolicy { objectsSizeInflightPerPeer , maxObjectsSizeInflight , objectInflightMultiplicity } - sharedState@SharedObjectState - { peerObjectStates - , inflightObjects - , inflightObjectsSize - , bufferedObjects - , inSubmissionToObjectPoolObjects + sharedState@DecisionGlobalState + { peerStates + , globalInFlightObjects + , globalInFlightObjectsSize + , globalObtainedButNotAckedObjects + , globalToPoolObjects , referenceCounts } = - -- outer fold: fold `[(peerAddr, PeerObjectState objectId object)]` + -- outer fold: fold `[(peerAddr, DecisionPeerState objectId object)]` List.mapAccumR accumFn -- initial state St - { stInflight = inflightObjects - , stInflightSize = inflightObjectsSize + { stInflight = globalInFlightObjects + , stInflightSize = globalInFlightObjectsSize , stAcknowledged = Map.empty - , stInSubmissionToObjectPoolObjects = Map.keysSet inSubmissionToObjectPoolObjects + , stInSubmissionToObjectPoolObjects = Map.keysSet globalToPoolObjects } >>> gn where accumFn :: St peerAddr objectId object -> - (peerAddr, PeerObjectState objectId object) -> + (peerAddr, DecisionPeerState objectId object) -> ( St peerAddr objectId object - , ( (peerAddr, PeerObjectState objectId object) - , ObjectDecision objectId object + , ( (peerAddr, DecisionPeerState objectId object) + , PeerDecision objectId object ) ) accumFn @@ -167,18 +167,18 @@ pickObjectsToDownload , stInSubmissionToObjectPoolObjects } ( peerAddr - , peerObjectState@PeerObjectState + , peerObjectState@DecisionPeerState { availableObjectIds - , unknownObjects - , requestedObjectsInflight - , requestedObjectsInflightSize + , requestedButNotReceived + , inFlight + , inFlightSize } ) = let sizeInflightAll :: SizeInBytes sizeInflightOther :: SizeInBytes sizeInflightAll = stInflightSize - sizeInflightOther = sizeInflightAll - requestedObjectsInflightSize + sizeInflightOther = sizeInflightAll - inFlightSize in if sizeInflightAll >= maxObjectsSizeInflight then let ( numObjectIdsToAck @@ -192,7 +192,7 @@ pickObjectsToDownload stInSubmissionToObjectPoolObjects' = stInSubmissionToObjectPoolObjects <> Set.fromList (map fst listOfObjectsToObjectPool) - in if requestedObjectIdsInflight peerObjectState' > 0 + in if numIdsInFlight peerObjectState' > 0 then -- we have objectIds to request ( st @@ -201,13 +201,13 @@ pickObjectsToDownload } , ( (peerAddr, peerObjectState') - , ObjectDecision + , PeerDecision { objectIdsToAcknowledge = numObjectIdsToAck , objectIdsToRequest = numObjectIdsToReq , objectPipelineObjectIds = not . StrictSeq.null - . unacknowledgedObjectIds + . outstandingFifo $ peerObjectState' , objectsToRequest = Map.empty , objectsToObjectPool = objectsToObjectPool @@ -220,14 +220,14 @@ pickObjectsToDownload ( st , ( (peerAddr, peerObjectState') - , emptyObjectDecision + , emptyPeerDecision ) ) else - let requestedObjectsInflightSize' :: SizeInBytes + let inFlightSize' :: SizeInBytes objectsToRequestMap :: Map objectId SizeInBytes - (requestedObjectsInflightSize', objectsToRequestMap) = + (inFlightSize', objectsToRequestMap) = -- inner fold: fold available `objectId`s -- -- Note: although `Map.foldrWithKey` could be used here, it @@ -261,13 +261,13 @@ pickObjectsToDownload stInflight -- remove `object`s which were already downloaded by some -- other peer or are in-flight or unknown by this peer. - `Map.withoutKeys` ( Map.keysSet bufferedObjects - <> requestedObjectsInflight - <> unknownObjects + `Map.withoutKeys` ( Map.keysSet globalObtainedButNotAckedObjects + <> inFlight + <> requestedButNotReceived <> stInSubmissionToObjectPoolObjects ) ) - requestedObjectsInflightSize + inFlightSize -- pick from `objectId`'s which are available from that given -- peer. Since we are folding a dictionary each `objectId` -- will be selected only once from a given peer (at least @@ -276,9 +276,9 @@ pickObjectsToDownload objectsToRequest = Map.keysSet objectsToRequestMap peerObjectState' = peerObjectState - { requestedObjectsInflightSize = requestedObjectsInflightSize' - , requestedObjectsInflight = - requestedObjectsInflight + { inFlightSize = inFlightSize' + , inFlight = + inFlight <> objectsToRequest } @@ -302,23 +302,23 @@ pickObjectsToDownload stInSubmissionToObjectPoolObjects' = stInSubmissionToObjectPoolObjects <> Set.fromList (map fst listOfObjectsToObjectPool) - in if requestedObjectIdsInflight peerObjectState'' > 0 + in if numIdsInFlight peerObjectState'' > 0 then -- we can request `objectId`s & `object`s ( St { stInflight = stInflight' - , stInflightSize = sizeInflightOther + requestedObjectsInflightSize' + , stInflightSize = sizeInflightOther + inFlightSize' , stAcknowledged = stAcknowledged' , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' } , ( (peerAddr, peerObjectState'') - , ObjectDecision + , PeerDecision { objectIdsToAcknowledge = numObjectIdsToAck , objectPipelineObjectIds = not . StrictSeq.null - . unacknowledgedObjectIds + . outstandingFifo $ peerObjectState'' , objectIdsToRequest = numObjectIdsToReq , objectsToRequest = objectsToRequestMap @@ -330,21 +330,21 @@ pickObjectsToDownload -- there are no `objectId`s to request, only `object`s. ( st { stInflight = stInflight' - , stInflightSize = sizeInflightOther + requestedObjectsInflightSize' + , stInflightSize = sizeInflightOther + inFlightSize' , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' } , ( (peerAddr, peerObjectState'') - , emptyObjectDecision{objectsToRequest = objectsToRequestMap} + , emptyPeerDecision{objectsToRequest = objectsToRequestMap} ) ) gn :: ( St peerAddr objectId object - , [((peerAddr, PeerObjectState objectId object), ObjectDecision objectId object)] + , [((peerAddr, DecisionPeerState objectId object), PeerDecision objectId object)] ) -> - ( SharedObjectState peerAddr objectId object - , [(peerAddr, ObjectDecision objectId object)] + ( DecisionGlobalState peerAddr objectId object + , [(peerAddr, PeerDecision objectId object)] ) gn ( St @@ -354,9 +354,9 @@ pickObjectsToDownload } , as ) = - let peerObjectStates' = + let peerStates' = Map.fromList ((\(a, _) -> a) <$> as) - <> peerObjectStates + <> peerStates referenceCounts' = Map.merge @@ -372,24 +372,24 @@ pickObjectsToDownload liveSet = Map.keysSet referenceCounts' - bufferedObjects' = - bufferedObjects + globalObtainedButNotAckedObjects' = + globalObtainedButNotAckedObjects `Map.restrictKeys` liveSet - inSubmissionToObjectPoolObjects' = - List.foldl' updateInSubmissionToObjectPoolObjects inSubmissionToObjectPoolObjects as + globalToPoolObjects' = + List.foldl' updateInSubmissionToObjectPoolObjects globalToPoolObjects as in ( sharedState - { peerObjectStates = peerObjectStates' - , inflightObjects = stInflight - , inflightObjectsSize = stInflightSize - , bufferedObjects = bufferedObjects' + { peerStates = peerStates' + , globalInFlightObjects = stInflight + , globalInFlightObjectsSize = stInflightSize + , globalObtainedButNotAckedObjects = globalObtainedButNotAckedObjects' , referenceCounts = referenceCounts' - , inSubmissionToObjectPoolObjects = inSubmissionToObjectPoolObjects' + , globalToPoolObjects = globalToPoolObjects' } , -- exclude empty results mapMaybe ( \((a, _), b) -> case b of - ObjectDecision + PeerDecision { objectIdsToAcknowledge = 0 , objectIdsToRequest = 0 , objectsToRequest @@ -406,9 +406,9 @@ pickObjectsToDownload updateInSubmissionToObjectPoolObjects :: forall a. Map objectId Int -> - (a, ObjectDecision objectId object) -> + (a, PeerDecision objectId object) -> Map objectId Int - updateInSubmissionToObjectPoolObjects m (_, ObjectDecision{objectsToObjectPool}) = + updateInSubmissionToObjectPoolObjects m (_, PeerDecision{objectsToObjectPool}) = List.foldl' fn m (listOfObjectsToObjectPool objectsToObjectPool) where fn :: @@ -429,75 +429,75 @@ filterActivePeers :: forall peerAddr objectId object. Ord objectId => HasCallStack => - ObjectDecisionPolicy -> - SharedObjectState peerAddr objectId object -> - Map peerAddr (PeerObjectState objectId object) + PeerDecisionPolicy -> + DecisionGlobalState peerAddr objectId object -> + Map peerAddr (DecisionPeerState objectId object) filterActivePeers - policy@ObjectDecisionPolicy + policy@PeerDecisionPolicy { maxUnacknowledgedObjectIds , objectsSizeInflightPerPeer , maxObjectsSizeInflight , objectInflightMultiplicity } - sharedObjectState@SharedObjectState - { peerObjectStates - , bufferedObjects - , inflightObjects - , inflightObjectsSize - , inSubmissionToObjectPoolObjects + sharedObjectState@DecisionGlobalState + { peerStates + , globalObtainedButNotAckedObjects + , globalInFlightObjects + , globalInFlightObjectsSize + , globalToPoolObjects } - | inflightObjectsSize > maxObjectsSizeInflight = + | globalInFlightObjectsSize > maxObjectsSizeInflight = -- we might be able to request objectIds, we cannot download objects - Map.filter fn peerObjectStates + Map.filter fn peerStates | otherwise = -- we might be able to request objectIds or objects. - Map.filter gn peerObjectStates + Map.filter gn peerStates where unrequestable = - Map.keysSet (Map.filter (>= objectInflightMultiplicity) inflightObjects) - <> Map.keysSet bufferedObjects + Map.keysSet (Map.filter (>= objectInflightMultiplicity) globalInFlightObjects) + <> Map.keysSet globalObtainedButNotAckedObjects - fn :: PeerObjectState objectId object -> Bool + fn :: DecisionPeerState objectId object -> Bool fn - peerObjectState@PeerObjectState - { requestedObjectIdsInflight + peerObjectState@DecisionPeerState + { numIdsInFlight } = - requestedObjectIdsInflight == 0 + numIdsInFlight == 0 -- if a peer has objectIds in-flight, we cannot request more objectIds or objects. - && requestedObjectIdsInflight + numOfUnacked <= maxUnacknowledgedObjectIds + && numIdsInFlight + numOfUnacked <= maxUnacknowledgedObjectIds && objectIdsToRequest > 0 where - -- Split `unacknowledgedObjectIds'` into the longest prefix of `objectId`s which + -- Split `outstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. (objectIdsToRequest, _, unackedObjectIds) = splitAcknowledgedObjectIds policy sharedObjectState peerObjectState numOfUnacked = fromIntegral (StrictSeq.length unackedObjectIds) - gn :: PeerObjectState objectId object -> Bool + gn :: DecisionPeerState objectId object -> Bool gn - peerObjectState@PeerObjectState - { unacknowledgedObjectIds - , requestedObjectIdsInflight - , requestedObjectsInflight - , requestedObjectsInflightSize + peerObjectState@DecisionPeerState + { outstandingFifo + , numIdsInFlight + , inFlight + , inFlightSize , availableObjectIds - , unknownObjects + , requestedButNotReceived } = - ( requestedObjectIdsInflight == 0 - && requestedObjectIdsInflight + numOfUnacked <= maxUnacknowledgedObjectIds + ( numIdsInFlight == 0 + && numIdsInFlight + numOfUnacked <= maxUnacknowledgedObjectIds && objectIdsToRequest > 0 ) || (underSizeLimit && not (Map.null downloadable)) where - numOfUnacked = fromIntegral (StrictSeq.length unacknowledgedObjectIds) - underSizeLimit = requestedObjectsInflightSize <= objectsSizeInflightPerPeer + numOfUnacked = fromIntegral (StrictSeq.length outstandingFifo) + underSizeLimit = inFlightSize <= objectsSizeInflightPerPeer downloadable = availableObjectIds - `Map.withoutKeys` requestedObjectsInflight - `Map.withoutKeys` unknownObjects + `Map.withoutKeys` inFlight + `Map.withoutKeys` requestedButNotReceived `Map.withoutKeys` unrequestable - `Map.withoutKeys` Map.keysSet inSubmissionToObjectPoolObjects + `Map.withoutKeys` Map.keysSet globalToPoolObjects - -- Split `unacknowledgedObjectIds'` into the longest prefix of `objectId`s which + -- Split `outstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. (objectIdsToRequest, _, _) = splitAcknowledgedObjectIds policy sharedObjectState peerObjectState diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs index f85f23f16c..49ef5569ca 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -1,68 +1,58 @@ {-# LANGUAGE ImportQualifiedPost #-} -{-# LANGUAGE NumericUnderscores #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy - ( ObjectDecisionPolicy (..) - , defaultObjectDecisionPolicy - , max_OBJECT_SIZE + ( PeerDecisionPolicy (..) + , defaultPeerDecisionPolicy -- * Re-exports - , NumObjectIdsToReq (..) + , NumObjectIdsReq (..) ) where import Control.Monad.Class.MonadTime.SI -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (SizeInBytes (..)) -import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsToReq (..)) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq (..)) --- | Maximal object size. --- --- Affects: --- --- * `ObjectDecisionPolicy` --- * `maximumIngressQueue` for `object-submission` mini-protocol, see --- `Ouroboros.Consensus.MiniProtocol.NodeToNode.objectDiffusionProtocolLimits` -max_OBJECT_SIZE :: SizeInBytes -max_OBJECT_SIZE = 65_540 +newtype NumObjects = NumObjects Int + deriving Show -- | Policy for making decisions -data ObjectDecisionPolicy = ObjectDecisionPolicy - { maxNumObjectIdsToRequest :: !NumObjectIdsToReq +data PeerDecisionPolicy = PeerDecisionPolicy + { maxNumObjectIdsRequest :: !NumObjectIdsReq -- ^ a maximal number of objectIds requested at once. - , maxUnacknowledgedObjectIds :: !NumObjectIdsToReq - -- ^ maximal number of unacknowledgedObjectIds. Measured in `NumObjectIdsToReq` + , maxUnacknowledgedObjectIds :: !NumObjectIdsReq + -- ^ maximal number of outstandingFifo. Measured in `NumObjectIdsReq` -- since we enforce this policy by requesting not more objectIds than what -- this limit allows. , -- -- Configuration of object decision logic. -- - objectsSizeInflightPerPeer :: !SizeInBytes - -- ^ a limit of object size in-flight from a single peer. - -- It can be exceed by max object size. - , maxObjectsSizeInflight :: !SizeInBytes - -- ^ a limit of object size in-flight from all peers. - -- It can be exceed by max object size. + objectsSizeInflightPerPeer :: !NumObjects + -- ^ a limit of objects in-flight from a single peer, plus or minus 1. + , maxObjectsSizeInflight :: !NumObjects + -- ^ a limit of object size in-flight from all peers, plus or minus 1 , objectInflightMultiplicity :: !Int -- ^ from how many peers download the `objectId` simultaneously - , bufferedObjectsMinLifetime :: !DiffTime + , globalObtainedButNotAckedObjectsMinLifetime :: !DiffTime -- ^ how long OBJECTs that have been added to the objectpool will be - -- kept in the `bufferedObjects` cache. + -- kept in the `globalObtainedButNotAckedObjects` cache. , scoreRate :: !Double -- ^ rate at which "rejected" OBJECTs drain. Unit: OBJECT/seconds. + -- TODO: still relevant? , scoreMax :: !Double -- ^ Maximum number of "rejections". Unit: seconds + -- TODO: still relevant? } deriving Show -defaultObjectDecisionPolicy :: ObjectDecisionPolicy -defaultObjectDecisionPolicy = - ObjectDecisionPolicy - { maxNumObjectIdsToRequest = 3 +defaultPeerDecisionPolicy :: PeerDecisionPolicy +defaultPeerDecisionPolicy = + PeerDecisionPolicy + { maxNumObjectIdsRequest = 3 , maxUnacknowledgedObjectIds = 10 -- must be the same as objectDiffusionMaxUnacked - , objectsSizeInflightPerPeer = max_OBJECT_SIZE * 6 - , maxObjectsSizeInflight = max_OBJECT_SIZE * 20 + , objectsSizeInflightPerPeer = NumObjects 6 + , maxObjectsSizeInflight = NumObjects 20 , objectInflightMultiplicity = 2 - , bufferedObjectsMinLifetime = 2 + , globalObtainedButNotAckedObjectsMinLifetime = 2 , scoreRate = 0.1 , scoreMax = 15 * 60 } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 67c3c2494f..f5587190f9 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -9,8 +9,8 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry ( ObjectChannels (..) , ObjectChannelsVar , ObjectObjectPoolSem - , SharedObjectStateVar - , newSharedObjectStateVar + , DecisionGlobalStateVar + , newDecisionGlobalStateVar , newObjectChannelsVar , newObjectObjectPoolSem , PeerObjectAPI (..) @@ -47,7 +47,7 @@ import Ouroboros.Network.Protocol.ObjectDiffusion.Type -- | Communication channels between `ObjectDiffusion` client mini-protocol and -- decision logic. newtype ObjectChannels m peerAddr objectId object = ObjectChannels - { objectChannelMap :: Map peerAddr (StrictMVar m (ObjectDecision objectId object)) + { objectChannelMap :: Map peerAddr (StrictMVar m (PeerDecision objectId object)) } type ObjectChannelsVar m peerAddr objectId object = @@ -61,12 +61,12 @@ newtype ObjectObjectPoolSem m = ObjectObjectPoolSem (TSem m) newObjectObjectPoolSem :: MonadSTM m => m (ObjectObjectPoolSem m) newObjectObjectPoolSem = ObjectObjectPoolSem <$> atomically (newTSem 1) --- | API to access `PeerObjectState` inside `PeerObjectStateVar`. +-- | API to access `DecisionPeerState` inside `DecisionPeerStateVar`. data PeerObjectAPI m objectId object = PeerObjectAPI - { readObjectDecision :: m (ObjectDecision objectId object) - -- ^ a blocking action which reads `ObjectDecision` + { readPeerDecision :: m (PeerDecision objectId object) + -- ^ a blocking action which reads `PeerDecision` , handleReceivedObjectIds :: - NumObjectIdsToReq -> + NumObjectIdsReq -> StrictSeq objectId -> -- \^ received objectIds Map objectId SizeInBytes -> @@ -91,10 +91,10 @@ data PeerObjectAPI m objectId object = PeerObjectAPI data ObjectObjectPoolResult = ObjectAccepted | ObjectRejected -- | A bracket function which registers / de-registers a new peer in --- `SharedObjectStateVar` and `PeerObjectStateVar`s, which exposes `PeerObjectStateAPI`. --- `PeerObjectStateAPI` is only safe inside the `withPeer` scope. +-- `DecisionGlobalStateVar` and `DecisionPeerStateVar`s, which exposes `DecisionPeerStateAPI`. +-- `DecisionPeerStateAPI` is only safe inside the `withPeer` scope. withPeer :: - forall object peerAddr objectId idx m a. + forall object peerAddr objectId ticketNo m a. ( MonadMask m , MonadMVar m , MonadSTM m @@ -108,22 +108,22 @@ withPeer :: Tracer m (TraceObjectLogic peerAddr objectId object) -> ObjectChannelsVar m peerAddr objectId object -> ObjectObjectPoolSem m -> - ObjectDecisionPolicy -> - SharedObjectStateVar m peerAddr objectId object -> - ObjectDiffusionObjectPoolReader objectId object idx m -> - ObjectDiffusionObjectPoolWriter objectId object idx m -> + PeerDecisionPolicy -> + DecisionGlobalStateVar m peerAddr objectId object -> + ObjectDiffusionObjectPoolReader objectId object ticketNo m -> + ObjectDiffusionObjectPoolWriter objectId object ticketNo m -> (object -> SizeInBytes) -> peerAddr -> -- ^ new peer - -- | callback which gives access to `PeerObjectStateAPI` + -- | callback which gives access to `DecisionPeerStateAPI` (PeerObjectAPI m objectId object -> m a) -> m a withPeer tracer channelsVar (ObjectObjectPoolSem objectpoolSem) - policy@ObjectDecisionPolicy{bufferedObjectsMinLifetime} + policy@PeerDecisionPolicy{globalObtainedButNotAckedObjectsMinLifetime} sharedStateVar ObjectDiffusionObjectPoolReader{objectpoolGetSnapshot} ObjectDiffusionObjectPoolWriter{objectpoolAddObjects} @@ -149,7 +149,7 @@ withPeer return ( ObjectChannels{objectChannelMap = objectChannelMap'} , PeerObjectAPI - { readObjectDecision = takeMVar chann' + { readPeerDecision = takeMVar chann' , handleReceivedObjectIds , handleReceivedObjects , submitObjectToObjectPool @@ -171,58 +171,58 @@ withPeer io where registerPeer :: - SharedObjectState peerAddr objectId object -> - SharedObjectState peerAddr objectId object - registerPeer st@SharedObjectState{peerObjectStates} = + DecisionGlobalState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object + registerPeer st@DecisionGlobalState{peerStates} = st - { peerObjectStates = + { peerStates = Map.insert peerAddr - PeerObjectState + DecisionPeerState { availableObjectIds = Map.empty - , requestedObjectIdsInflight = 0 - , requestedObjectsInflightSize = 0 - , requestedObjectsInflight = Set.empty - , unacknowledgedObjectIds = StrictSeq.empty - , unknownObjects = Set.empty + , numIdsInFlight = 0 + , inFlightSize = 0 + , inFlight = Set.empty + , outstandingFifo = StrictSeq.empty + , requestedButNotReceived = Set.empty , score = 0 , scoreTs = Time 0 - , downloadedObjects = Map.empty - , toObjectPoolObjects = Map.empty + , pendingObjects = Map.empty + , toPoolObjects = Map.empty } - peerObjectStates + peerStates } -- TODO: this function needs to be tested! -- Issue: https://github.com/IntersectMBO/ouroboros-network/issues/5151 unregisterPeer :: - SharedObjectState peerAddr objectId object -> - SharedObjectState peerAddr objectId object + DecisionGlobalState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object unregisterPeer - st@SharedObjectState - { peerObjectStates - , bufferedObjects + st@DecisionGlobalState + { peerStates + , globalObtainedButNotAckedObjects , referenceCounts - , inflightObjects - , inflightObjectsSize - , inSubmissionToObjectPoolObjects + , globalInFlightObjects + , globalInFlightObjectsSize + , globalToPoolObjects } = st - { peerObjectStates = peerObjectStates' - , bufferedObjects = bufferedObjects' + { peerStates = peerStates' + , globalObtainedButNotAckedObjects = globalObtainedButNotAckedObjects' , referenceCounts = referenceCounts' - , inflightObjects = inflightObjects' - , inflightObjectsSize = inflightObjectsSize' - , inSubmissionToObjectPoolObjects = inSubmissionToObjectPoolObjects' + , globalInFlightObjects = globalInFlightObjects' + , globalInFlightObjectsSize = globalInFlightObjectsSize' + , globalToPoolObjects = globalToPoolObjects' } where - ( PeerObjectState - { unacknowledgedObjectIds - , requestedObjectsInflight - , requestedObjectsInflightSize - , toObjectPoolObjects + ( DecisionPeerState + { outstandingFifo + , inFlight + , inFlightSize + , toPoolObjects } - , peerObjectStates' + , peerStates' ) = Map.alterF ( \case @@ -230,7 +230,7 @@ withPeer Just a -> (a, Nothing) ) peerAddr - peerObjectStates + peerStates referenceCounts' = Foldable.foldl' @@ -240,28 +240,28 @@ withPeer else Nothing ) referenceCounts - unacknowledgedObjectIds + outstandingFifo liveSet = Map.keysSet referenceCounts' - bufferedObjects' = - bufferedObjects + globalObtainedButNotAckedObjects' = + globalObtainedButNotAckedObjects `Map.restrictKeys` liveSet - inflightObjects' = Foldable.foldl' purgeInflightObjects inflightObjects requestedObjectsInflight - inflightObjectsSize' = inflightObjectsSize - requestedObjectsInflightSize + globalInFlightObjects' = Foldable.foldl' purgeInflightObjects globalInFlightObjects inFlight + globalInFlightObjectsSize' = globalInFlightObjectsSize - inFlightSize -- When we unregister a peer, we need to subtract all objects in the - -- `toObjectPoolObjects`, as they will not be submitted to the objectpool. - inSubmissionToObjectPoolObjects' = + -- `toPoolObjects`, as they will not be submitted to the objectpool. + globalToPoolObjects' = Foldable.foldl' ( flip $ Map.update \cnt -> if cnt > 1 then Just $! pred cnt else Nothing ) - inSubmissionToObjectPoolObjects - (Map.keysSet toObjectPoolObjects) + globalToPoolObjects + (Map.keysSet toPoolObjects) purgeInflightObjects m objectId = Map.alter fn objectId m where @@ -336,54 +336,54 @@ withPeer updateBufferedObject :: Time -> ObjectObjectPoolResult -> - SharedObjectState peerAddr objectId object -> - SharedObjectState peerAddr objectId object + DecisionGlobalState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object updateBufferedObject _ ObjectRejected - st@SharedObjectState - { peerObjectStates - , inSubmissionToObjectPoolObjects + st@DecisionGlobalState + { peerStates + , globalToPoolObjects } = st - { peerObjectStates = peerObjectStates' - , inSubmissionToObjectPoolObjects = inSubmissionToObjectPoolObjects' + { peerStates = peerStates' + , globalToPoolObjects = globalToPoolObjects' } where - inSubmissionToObjectPoolObjects' = + globalToPoolObjects' = Map.update (\case 1 -> Nothing; n -> Just $! pred n) objectId - inSubmissionToObjectPoolObjects + globalToPoolObjects - peerObjectStates' = Map.update fn peerAddr peerObjectStates + peerStates' = Map.update fn peerAddr peerStates where - fn ps = Just $! ps{toObjectPoolObjects = Map.delete objectId (toObjectPoolObjects ps)} + fn ps = Just $! ps{toPoolObjects = Map.delete objectId (toPoolObjects ps)} updateBufferedObject now ObjectAccepted - st@SharedObjectState - { peerObjectStates - , bufferedObjects + st@DecisionGlobalState + { peerStates + , globalObtainedButNotAckedObjects , referenceCounts - , timedObjects - , inSubmissionToObjectPoolObjects + , globalRententionTimeouts + , globalToPoolObjects } = st - { peerObjectStates = peerObjectStates' - , bufferedObjects = bufferedObjects' - , timedObjects = timedObjects' + { peerStates = peerStates' + , globalObtainedButNotAckedObjects = globalObtainedButNotAckedObjects' + , globalRententionTimeouts = globalRententionTimeouts' , referenceCounts = referenceCounts' - , inSubmissionToObjectPoolObjects = inSubmissionToObjectPoolObjects' + , globalToPoolObjects = globalToPoolObjects' } where - inSubmissionToObjectPoolObjects' = + globalToPoolObjects' = Map.update (\case 1 -> Nothing; n -> Just $! pred n) objectId - inSubmissionToObjectPoolObjects + globalToPoolObjects - timedObjects' = Map.alter fn (addTime bufferedObjectsMinLifetime now) timedObjects + globalRententionTimeouts' = Map.alter fn (addTime globalObtainedButNotAckedObjectsMinLifetime now) globalRententionTimeouts where fn :: Maybe [objectId] -> Maybe [objectId] fn Nothing = Just [objectId] @@ -395,14 +395,14 @@ withPeer fn Nothing = Just 1 fn (Just n) = Just $! succ n - bufferedObjects' = Map.insert objectId (Just object) bufferedObjects + globalObtainedButNotAckedObjects' = Map.insert objectId (Just object) globalObtainedButNotAckedObjects - peerObjectStates' = Map.update fn peerAddr peerObjectStates + peerStates' = Map.update fn peerAddr peerStates where - fn ps = Just $! ps{toObjectPoolObjects = Map.delete objectId (toObjectPoolObjects ps)} + fn ps = Just $! ps{toPoolObjects = Map.delete objectId (toPoolObjects ps)} handleReceivedObjectIds :: - NumObjectIdsToReq -> + NumObjectIdsReq -> StrictSeq objectId -> Map objectId SizeInBytes -> m () @@ -425,7 +425,7 @@ withPeer handleReceivedObjects objectIds objects = collectObjects tracer objectSize sharedStateVar peerAddr objectIds objects - -- Update `score` & `scoreTs` fields of `PeerObjectState`, return the new + -- Update `score` & `scoreTs` fields of `DecisionPeerState`, return the new -- updated `score`. -- -- PRECONDITION: the `Double` argument is non-negative. @@ -437,27 +437,27 @@ withPeer | n < 0 = error ("ObjectDiffusion.countRejectedObjects: invariant violation for peer " ++ show peerAddr) countRejectedObjects now n = atomically $ stateTVar sharedStateVar $ \st -> - let (result, peerObjectStates') = Map.alterF fn peerAddr (peerObjectStates st) - in (result, st{peerObjectStates = peerObjectStates'}) + let (result, peerStates') = Map.alterF fn peerAddr (peerStates st) + in (result, st{peerStates = peerStates'}) where - fn :: Maybe (PeerObjectState objectId object) -> (Double, Maybe (PeerObjectState objectId object)) + fn :: Maybe (DecisionPeerState objectId object) -> (Double, Maybe (DecisionPeerState objectId object)) fn Nothing = error ("ObjectDiffusion.withPeer: invariant violation for peer " ++ show peerAddr) fn (Just ps) = (score ps', Just $! ps') where ps' = updateRejects policy now n ps updateRejects :: - ObjectDecisionPolicy -> + PeerDecisionPolicy -> Time -> Double -> - PeerObjectState objectId object -> - PeerObjectState objectId object + DecisionPeerState objectId object -> + DecisionPeerState objectId object updateRejects _ now 0 pts | score pts == 0 = pts{scoreTs = now} updateRejects - ObjectDecisionPolicy{scoreRate, scoreMax} + PeerDecisionPolicy{scoreRate, scoreMax} now n - pts@PeerObjectState{score, scoreTs} = + pts@DecisionPeerState{score, scoreTs} = let duration = diffTime now scoreTs !drain = realToFrac duration * scoreRate !drained = max 0 $ score - drain @@ -474,8 +474,8 @@ drainRejectionThread :: , Ord objectId ) => Tracer m (TraceObjectLogic peerAddr objectId object) -> - ObjectDecisionPolicy -> - SharedObjectStateVar m peerAddr objectId object -> + PeerDecisionPolicy -> + DecisionGlobalStateVar m peerAddr objectId object -> m Void drainRejectionThread tracer policy sharedStateVar = do labelThisThread "object-rejection-drain" @@ -494,17 +494,17 @@ drainRejectionThread tracer policy sharedStateVar = do st <- readTVar sharedStateVar let ptss = if now > nextDrain - then Map.map (updateRejects policy now 0) (peerObjectStates st) - else peerObjectStates st + then Map.map (updateRejects policy now 0) (peerStates st) + else peerStates st st' = tickTimedObjects now st - { peerObjectStates = ptss + { peerStates = ptss } writeTVar sharedStateVar st' return st' - traceWith tracer (TraceSharedObjectState "drainRejectionThread" st'') + traceWith tracer (TraceDecisionGlobalState "drainRejectionThread" st'') if now > nextDrain then go $ addTime drainInterval now @@ -523,9 +523,9 @@ decisionLogicThread :: ) => Tracer m (TraceObjectLogic peerAddr objectId object) -> Tracer m ObjectDiffusionCounters -> - ObjectDecisionPolicy -> + PeerDecisionPolicy -> ObjectChannelsVar m peerAddr objectId object -> - SharedObjectStateVar m peerAddr objectId object -> + DecisionGlobalStateVar m peerAddr objectId object -> m Void decisionLogicThread tracer counterTracer policy objectChannelsVar sharedStateVar = do labelThisThread "object-decision" @@ -547,8 +547,8 @@ decisionLogicThread tracer counterTracer policy objectChannelsVar sharedStateVar let (sharedState, decisions) = makeDecisions policy sharedObjectState activePeers writeTVar sharedStateVar sharedState return (decisions, sharedState) - traceWith tracer (TraceSharedObjectState "decisionLogicThread" st) - traceWith tracer (TraceObjectDecisions decisions) + traceWith tracer (TraceDecisionGlobalState "decisionLogicThread" st) + traceWith tracer (TracePeerDecisions decisions) ObjectChannels{objectChannelMap} <- readMVar objectChannelsVar traverse_ (\(mvar, d) -> modifyMVarWithDefault_ mvar d (\d' -> pure (d' <> d))) @@ -585,9 +585,9 @@ decisionLogicThreads :: ) => Tracer m (TraceObjectLogic peerAddr objectId object) -> Tracer m ObjectDiffusionCounters -> - ObjectDecisionPolicy -> + PeerDecisionPolicy -> ObjectChannelsVar m peerAddr objectId object -> - SharedObjectStateVar m peerAddr objectId object -> + DecisionGlobalStateVar m peerAddr objectId object -> m Void decisionLogicThreads tracer counterTracer policy objectChannelsVar sharedStateVar = uncurry (<>) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 0558ad9353..db362dc350 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -7,10 +7,10 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State ( -- * Core API - SharedObjectState (..) - , PeerObjectState (..) - , SharedObjectStateVar - , newSharedObjectStateVar + DecisionGlobalState (..) + , DecisionPeerState (..) + , DecisionGlobalStateVar + , newDecisionGlobalStateVar , receivedObjectIds , collectObjects , acknowledgeObjectIds @@ -38,13 +38,14 @@ import Data.Map.Strict qualified as Map import Data.Maybe (fromJust, maybeToList) import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq +import Data.Set (Set) import Data.Set qualified as Set import Data.Typeable (Typeable) import GHC.Stack (HasCallStack) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (ObjectPoolSnapshot (..), SizeInBytes (..)) -import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsToAck (..)) +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (SizeInBytes (..), ObjectPoolWriter (opwHasObject)) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsAck (..)) import System.Random (StdGen) -- @@ -55,28 +56,28 @@ acknowledgeObjectIds :: forall peerAddr object objectId. Ord objectId => HasCallStack => - ObjectDecisionPolicy -> - SharedObjectState peerAddr objectId object -> - PeerObjectState objectId object -> + PeerDecisionPolicy -> + DecisionGlobalState peerAddr objectId object -> + DecisionPeerState objectId object -> -- | number of objectId to acknowledge, requests, objects which we can submit to the - -- objectpool, objectIds to acknowledge with multiplicities, updated PeerObjectState. - ( NumObjectIdsToAck - , NumObjectIdsToReq + -- objectpool, objectIds to acknowledge with multiplicities, updated DecisionPeerState. + ( NumObjectIdsAck + , NumObjectIdsReq , ObjectsToObjectPool objectId object , RefCountDiff objectId - , PeerObjectState objectId object + , DecisionPeerState objectId object ) {-# INLINE acknowledgeObjectIds #-} acknowledgeObjectIds policy sharedObjectState - ps@PeerObjectState + ps@DecisionPeerState { availableObjectIds - , unknownObjects - , requestedObjectIdsInflight - , downloadedObjects + , requestedButNotReceived + , numIdsInFlight + , pendingObjects , score - , toObjectPoolObjects + , toPoolObjects } = -- We can only acknowledge objectIds when we can request new ones, since -- a `MsgRequestObjectIds` for 0 objectIds is a protocol error. @@ -87,15 +88,15 @@ acknowledgeObjectIds , ObjectsToObjectPool objectsToObjectPool , refCountDiff , ps - { unacknowledgedObjectIds = unacknowledgedObjectIds' + { outstandingFifo = outstandingFifo' , availableObjectIds = availableObjectIds' - , unknownObjects = unknownObjects' - , requestedObjectIdsInflight = - requestedObjectIdsInflight + , requestedButNotReceived = requestedButNotReceived' + , numIdsInFlight = + numIdsInFlight + objectIdsToRequest - , downloadedObjects = downloadedObjects' + , pendingObjects = pendingObjects' , score = score' - , toObjectPoolObjects = toObjectPoolObjects' + , toPoolObjects = toPoolObjects' } ) else @@ -103,31 +104,31 @@ acknowledgeObjectIds , 0 , ObjectsToObjectPool objectsToObjectPool , RefCountDiff Map.empty - , ps{toObjectPoolObjects = toObjectPoolObjects'} + , ps{toPoolObjects = toPoolObjects'} ) where - -- Split `unacknowledgedObjectIds'` into the longest prefix of `objectId`s which + -- Split `outstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. - (objectIdsToRequest, acknowledgedObjectIds, unacknowledgedObjectIds') = + (objectIdsToRequest, acknowledgedObjectIds, outstandingFifo') = splitAcknowledgedObjectIds policy sharedObjectState ps objectsToObjectPool = [ (objectId, object) | objectId <- toList toObjectPoolObjectIds - , objectId `Map.notMember` bufferedObjects sharedObjectState - , object <- maybeToList $ objectId `Map.lookup` downloadedObjects + , objectId `Map.notMember` globalObtainedButNotAckedObjects sharedObjectState + , object <- maybeToList $ objectId `Map.lookup` pendingObjects ] (toObjectPoolObjectIds, _) = - StrictSeq.spanl (`Map.member` downloadedObjects) acknowledgedObjectIds + StrictSeq.spanl (`Map.member` pendingObjects) acknowledgedObjectIds objectsToObjectPoolMap = Map.fromList objectsToObjectPool - toObjectPoolObjects' = toObjectPoolObjects <> objectsToObjectPoolMap + toPoolObjects' = toPoolObjects <> objectsToObjectPoolMap - (downloadedObjects', ackedDownloadedObjects) = Map.partitionWithKey (\objectId _ -> objectId `Set.member` liveSet) downloadedObjects + (pendingObjects', ackedDownloadedObjects) = Map.partitionWithKey (\objectId _ -> objectId `Set.member` liveSet) pendingObjects -- latexObjects: transactions which were downloaded by another peer before we -- downloaded them; it relies on that `objectToObjectPool` filters out - -- `bufferedObjects`. + -- `globalObtainedButNotAckedObjects`. lateObjects = Map.filterWithKey (\objectId _ -> objectId `Map.notMember` objectsToObjectPoolMap) @@ -135,17 +136,17 @@ acknowledgeObjectIds score' = score + fromIntegral (Map.size lateObjects) -- the set of live `objectIds` - liveSet = Set.fromList (toList unacknowledgedObjectIds') + liveSet = Set.fromList (toList outstandingFifo') availableObjectIds' = availableObjectIds `Map.restrictKeys` liveSet -- We remove all acknowledged `objectId`s which are not in - -- `unacknowledgedObjectIds''`, but also return the unknown set before any - -- modifications (which is used to compute `unacknowledgedObjectIds''` + -- `outstandingFifo''`, but also return the unknown set before any + -- modifications (which is used to compute `outstandingFifo''` -- above). - unknownObjects' = unknownObjects `Set.intersection` liveSet + requestedButNotReceived' = requestedButNotReceived `Set.intersection` liveSet refCountDiff = RefCountDiff $ @@ -158,7 +159,7 @@ acknowledgeObjectIds fn Nothing = Just 1 fn (Just n) = Just $! n + 1 - objectIdsToAcknowledge :: NumObjectIdsToAck + objectIdsToAcknowledge :: NumObjectIdsAck objectIdsToAcknowledge = fromIntegral $ StrictSeq.length acknowledgedObjectIds -- | Split unacknowledged objectIds into acknowledged and unacknowledged parts, also @@ -166,47 +167,47 @@ acknowledgeObjectIds splitAcknowledgedObjectIds :: Ord objectId => HasCallStack => - ObjectDecisionPolicy -> - SharedObjectState peer objectId object -> - PeerObjectState objectId object -> + PeerDecisionPolicy -> + DecisionGlobalState peer objectId object -> + DecisionPeerState objectId object -> -- | number of objectIds to request, acknowledged objectIds, unacknowledged objectIds - (NumObjectIdsToReq, StrictSeq.StrictSeq objectId, StrictSeq.StrictSeq objectId) + (NumObjectIdsReq, StrictSeq.StrictSeq objectId, StrictSeq.StrictSeq objectId) splitAcknowledgedObjectIds - ObjectDecisionPolicy + PeerDecisionPolicy { maxUnacknowledgedObjectIds - , maxNumObjectIdsToRequest + , maxNumObjectIdsRequest } - SharedObjectState - { bufferedObjects + DecisionGlobalState + { globalObtainedButNotAckedObjects } - PeerObjectState - { unacknowledgedObjectIds - , unknownObjects - , downloadedObjects - , requestedObjectsInflight - , requestedObjectIdsInflight + DecisionPeerState + { outstandingFifo + , requestedButNotReceived + , pendingObjects + , inFlight + , numIdsInFlight } = - (objectIdsToRequest, acknowledgedObjectIds', unacknowledgedObjectIds') + (objectIdsToRequest, acknowledgedObjectIds', outstandingFifo') where - (acknowledgedObjectIds', unacknowledgedObjectIds') = + (acknowledgedObjectIds', outstandingFifo') = StrictSeq.spanl ( \objectId -> - ( objectId `Map.member` bufferedObjects - || objectId `Set.member` unknownObjects - || objectId `Map.member` downloadedObjects + ( objectId `Map.member` globalObtainedButNotAckedObjects + || objectId `Set.member` requestedButNotReceived + || objectId `Map.member` pendingObjects ) - && objectId `Set.notMember` requestedObjectsInflight + && objectId `Set.notMember` inFlight ) - unacknowledgedObjectIds - numOfUnacked = StrictSeq.length unacknowledgedObjectIds + outstandingFifo + numOfUnacked = StrictSeq.length outstandingFifo numOfAcked = StrictSeq.length acknowledgedObjectIds' - unackedAndRequested = fromIntegral numOfUnacked + requestedObjectIdsInflight + unackedAndRequested = fromIntegral numOfUnacked + numIdsInFlight objectIdsToRequest = assert (unackedAndRequested <= maxUnacknowledgedObjectIds) $ - assert (requestedObjectIdsInflight <= maxNumObjectIdsToRequest) $ + assert (numIdsInFlight <= maxNumObjectIdsRequest) $ (maxUnacknowledgedObjectIds - unackedAndRequested + fromIntegral numOfAcked) - `min` (maxNumObjectIdsToRequest - requestedObjectIdsInflight) + `min` (maxNumObjectIdsRequest - numIdsInFlight) -- | `RefCountDiff` represents a map of `objectId` which can be acknowledged -- together with their multiplicities. @@ -237,17 +238,17 @@ tickTimedObjects :: forall peerAddr object objectId. Ord objectId => Time -> - SharedObjectState peerAddr objectId object -> - SharedObjectState peerAddr objectId object + DecisionGlobalState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object tickTimedObjects now - st@SharedObjectState - { timedObjects + st@DecisionGlobalState + { globalRententionTimeouts , referenceCounts - , bufferedObjects + , globalObtainedButNotAckedObjects } = - let (expiredObjects', timedObjects') = - case Map.splitLookup now timedObjects of + let (expiredObjects', globalRententionTimeouts') = + case Map.splitLookup now globalRententionTimeouts of (expired, Just objectIds, timed) -> ( expired -- Map.split doesn't include the `now` entry in the map , Map.insert now objectIds timed @@ -257,11 +258,11 @@ tickTimedObjects refDiff = Map.foldl' fn Map.empty expiredObjects' referenceCounts' = updateRefCounts referenceCounts (RefCountDiff refDiff) liveSet = Map.keysSet referenceCounts' - bufferedObjects' = bufferedObjects `Map.restrictKeys` liveSet + globalObtainedButNotAckedObjects' = globalObtainedButNotAckedObjects `Map.restrictKeys` liveSet in st - { timedObjects = timedObjects' + { globalRententionTimeouts = globalRententionTimeouts' , referenceCounts = referenceCounts' - , bufferedObjects = bufferedObjects' + , globalObtainedButNotAckedObjects = globalObtainedButNotAckedObjects' } where fn :: @@ -287,7 +288,7 @@ tickTimedObjects -- -- | Insert received `objectId`s and return the number of objectIds to be acknowledged --- and the updated `SharedObjectState`. +-- and the updated `DecisionGlobalState`. receivedObjectIdsImpl :: forall peerAddr object objectId. (Ord objectId, Ord peerAddr, HasCallStack) => @@ -296,45 +297,45 @@ receivedObjectIdsImpl :: (objectId -> Bool) -> peerAddr -> -- | number of requests to subtract from - -- `requestedObjectIdsInflight` - NumObjectIdsToReq -> + -- `numIdsInFlight` + NumObjectIdsReq -> -- | sequence of received `objectIds` StrictSeq objectId -> -- | received `objectId`s with sizes - Map objectId SizeInBytes -> - SharedObjectState peerAddr objectId object -> - SharedObjectState peerAddr objectId object + Set objectId -> + DecisionGlobalState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object receivedObjectIdsImpl objectpoolHasObject peerAddr reqNo objectIdsSeq - objectIdsMap - st@SharedObjectState - { peerObjectStates - , bufferedObjects + objectIdsSet + st@DecisionGlobalState + { peerStates + , globalObtainedButNotAckedObjects , referenceCounts } = - -- using `alterF` so the update of `PeerObjectState` is done in one lookup + -- using `alterF` so the update of `DecisionPeerState` is done in one lookup case Map.alterF (fmap Just . fn . fromJust) peerAddr - peerObjectStates of - (st', peerObjectStates') -> - st'{peerObjectStates = peerObjectStates'} + peerStates of + (st', peerStates') -> + st'{peerStates = peerStates'} where - -- update `PeerObjectState` and return number of `objectId`s to acknowledged and - -- updated `SharedObjectState`. + -- update `DecisionPeerState` and return number of `objectId`s to acknowledged and + -- updated `DecisionGlobalState`. fn :: - PeerObjectState objectId object -> - ( SharedObjectState peerAddr objectId object - , PeerObjectState objectId object + DecisionPeerState objectId object -> + ( DecisionGlobalState peerAddr objectId object + , DecisionPeerState objectId object ) fn - ps@PeerObjectState + ps@DecisionPeerState { availableObjectIds - , requestedObjectIdsInflight - , unacknowledgedObjectIds + , numIdsInFlight + , outstandingFifo } = (st', ps') where @@ -344,33 +345,31 @@ receivedObjectIdsImpl -- Divide the new objectIds in two: those that are already in the objectpool -- and those that are not. We'll request some objects from the latter. - (ignoredObjectIds, availableObjectIdsMap) = - Map.partitionWithKey - (\objectId _ -> objectpoolHasObject objectId) - objectIdsMap + (ignoredObjectIds, availableObjectIdsSet) = + Set.partition objectpoolHasObject objectIdsSet -- Add all `objectIds` from `availableObjectIdsMap` which are not -- unacknowledged or already buffered. Unacknowledged objectIds must have -- already been added to `availableObjectIds` map before. availableObjectIds' = - Map.foldlWithKey - (\m objectId sizeInBytes -> Map.insert objectId sizeInBytes m) + Set.foldl + (\m objectId -> Set.insert objectId m) availableObjectIds - ( Map.filterWithKey - ( \objectId _ -> - objectId `notElem` unacknowledgedObjectIds - && objectId `Map.notMember` bufferedObjects + ( Set.filter + ( \objectId -> + objectId `notElem` outstandingFifo + && objectId `Map.notMember` globalObtainedButNotAckedObjects ) - availableObjectIdsMap + availableObjectIdsSet ) - -- Add received objectIds to `unacknowledgedObjectIds`. - unacknowledgedObjectIds' = unacknowledgedObjectIds <> objectIdsSeq + -- Add received objectIds to `outstandingFifo`. + outstandingFifo' = outstandingFifo <> objectIdsSeq -- Add ignored `objects` to buffered ones. - -- Note: we prefer to keep the `object` if it's already in `bufferedObjects`. - bufferedObjects' = - bufferedObjects + -- Note: we prefer to keep the `object` if it's already in `globalObtainedButNotAckedObjects`. + globalObtainedButNotAckedObjects' = + globalObtainedButNotAckedObjects <> Map.map (const Nothing) ignoredObjectIds referenceCounts' = @@ -387,16 +386,16 @@ receivedObjectIdsImpl st' = st - { bufferedObjects = bufferedObjects' + { globalObtainedButNotAckedObjects = globalObtainedButNotAckedObjects' , referenceCounts = referenceCounts' } ps' = assert - (requestedObjectIdsInflight >= reqNo) + (numIdsInFlight >= reqNo) ps { availableObjectIds = availableObjectIds' - , unacknowledgedObjectIds = unacknowledgedObjectIds' - , requestedObjectIdsInflight = requestedObjectIdsInflight - reqNo + , outstandingFifo = outstandingFifo' + , numIdsInFlight = numIdsInFlight - reqNo } -- | We check advertised sizes up in a fuzzy way. The advertised and received @@ -418,37 +417,37 @@ collectObjectsImpl :: Map objectId SizeInBytes -> -- | received objects Map objectId object -> - SharedObjectState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object -> -- | Return list of `objectId` which sizes didn't match or a new state. -- If one of the `object` has wrong size, we return an error. The -- mini-protocol will throw, which will clean the state map from this peer. Either ObjectDiffusionProtocolError - (SharedObjectState peerAddr objectId object) + (DecisionGlobalState peerAddr objectId object) collectObjectsImpl objectSize peerAddr requestedObjectIdsMap receivedObjects - st@SharedObjectState{peerObjectStates} = - -- using `alterF` so the update of `PeerObjectState` is done in one lookup + st@DecisionGlobalState{peerStates} = + -- using `alterF` so the update of `DecisionPeerState` is done in one lookup case Map.alterF (fmap Just . fn . fromJust) peerAddr - peerObjectStates of - (Right st', peerObjectStates') -> - Right st'{peerObjectStates = peerObjectStates'} + peerStates of + (Right st', peerStates') -> + Right st'{peerStates = peerStates'} (Left e, _) -> Left $ ProtocolErrorObjectSizeError e where - -- Update `PeerObjectState` and partially update `SharedObjectState` (except of - -- `peerObjectStates`). + -- Update `DecisionPeerState` and partially update `DecisionGlobalState` (except of + -- `peerStates`). fn :: - PeerObjectState objectId object -> + DecisionPeerState objectId object -> ( Either [(objectId, SizeInBytes, SizeInBytes)] - (SharedObjectState peerAddr objectId object) - , PeerObjectState objectId object + (DecisionGlobalState peerAddr objectId object) + , DecisionPeerState objectId object ) fn ps = case wrongSizedObjects of @@ -488,21 +487,18 @@ collectObjectsImpl requestedObjectIds = Map.keysSet requestedObjectIdsMap notReceived = requestedObjectIds Set.\\ Map.keysSet receivedObjects - downloadedObjects' = downloadedObjects ps <> receivedObjects - -- Add not received objects to `unknownObjects` before acknowledging objectIds. - unknownObjects' = unknownObjects ps <> notReceived + pendingObjects' = pendingObjects ps <> receivedObjects + -- Add not received objects to `requestedButNotReceived` before acknowledging objectIds. + requestedButNotReceived' = requestedButNotReceived ps <> notReceived - requestedObjectsInflight' = - assert (requestedObjectIds `Set.isSubsetOf` requestedObjectsInflight ps) $ - requestedObjectsInflight ps Set.\\ requestedObjectIds + inFlight' = + assert (requestedObjectIds `Set.isSubsetOf` inFlight ps) $ + inFlight ps Set.\\ requestedObjectIds requestedSize = fold $ availableObjectIds ps `Map.restrictKeys` requestedObjectIds - requestedObjectsInflightSize' = - assert (requestedObjectsInflightSize ps >= requestedSize) $ - requestedObjectsInflightSize ps - requestedSize -- subtract requested from in-flight - inflightObjects'' = + globalInFlightObjects'' = Map.merge (Map.mapMaybeMissing \_ x -> Just x) (Map.mapMaybeMissing \_ _ -> assert False Nothing) @@ -514,21 +510,16 @@ collectObjectsImpl then Just z else Nothing ) - (inflightObjects st) + (globalInFlightObjects st) (Map.fromSet (const 1) requestedObjectIds) - inflightObjectsSize'' = - assert (inflightObjectsSize st >= requestedSize) $ - inflightObjectsSize st - requestedSize - st' = st - { inflightObjects = inflightObjects'' - , inflightObjectsSize = inflightObjectsSize'' + { globalInFlightObjects = globalInFlightObjects'' } -- - -- Update PeerObjectState + -- Update DecisionPeerState -- -- Remove the downloaded `objectId`s from the availableObjectIds map, this @@ -536,81 +527,79 @@ collectObjectsImpl -- once we collect the `objectId`s. Also restrict keys to `liveSet`. -- -- NOTE: we could remove `notReceived` from `availableObjectIds`; and - -- possibly avoid using `unknownObjects` field at all. + -- possibly avoid using `requestedButNotReceived` field at all. -- availableObjectIds'' = availableObjectIds ps `Map.withoutKeys` requestedObjectIds -- Remove all acknowledged `objectId`s from unknown set, but only those - -- which are not present in `unacknowledgedObjectIds'` - unknownObjects'' = - unknownObjects' + -- which are not present in `outstandingFifo'` + requestedButNotReceived'' = + requestedButNotReceived' `Set.intersection` live where -- We cannot use `liveSet` as `unknown <> notReceived` might -- contain `objectIds` which are in `liveSet` but are not `live`. - live = Set.fromList (toList (unacknowledgedObjectIds ps)) + live = Set.fromList (toList (outstandingFifo ps)) ps'' = ps { availableObjectIds = availableObjectIds'' - , unknownObjects = unknownObjects'' - , requestedObjectsInflightSize = requestedObjectsInflightSize' - , requestedObjectsInflight = requestedObjectsInflight' - , downloadedObjects = downloadedObjects' + , requestedButNotReceived = requestedButNotReceived'' + , inFlight = inFlight' + , pendingObjects = pendingObjects' } -- -- Monadic public API -- -type SharedObjectStateVar m peerAddr objectId object = - StrictTVar m (SharedObjectState peerAddr objectId object) +type DecisionGlobalStateVar m peerAddr objectId object = + StrictTVar m (DecisionGlobalState peerAddr objectId object) -newSharedObjectStateVar :: +newDecisionGlobalStateVar :: MonadSTM m => StdGen -> - m (SharedObjectStateVar m peerAddr objectId object) -newSharedObjectStateVar rng = + m (DecisionGlobalStateVar m peerAddr objectId object) +newDecisionGlobalStateVar rng = newTVarIO - SharedObjectState - { peerObjectStates = Map.empty - , inflightObjects = Map.empty - , inflightObjectsSize = 0 - , bufferedObjects = Map.empty + DecisionGlobalState + { peerStates = Map.empty + , globalInFlightObjects = Map.empty + , globalObtainedButNotAckedObjects = Map.empty , referenceCounts = Map.empty - , timedObjects = Map.empty - , inSubmissionToObjectPoolObjects = Map.empty - , peerRng = rng + , globalRententionTimeouts = Map.empty + , globalToPoolObjects = Map.empty + , orderRng = rng } -- | Acknowledge `objectId`s, return the number of `objectIds` to be acknowledged to the -- remote side. receivedObjectIds :: - forall m peerAddr idx object objectId. + forall m peerAddr ticketNo object objectId. (MonadSTM m, Ord objectId, Ord peerAddr) => Tracer m (TraceObjectLogic peerAddr objectId object) -> - SharedObjectStateVar m peerAddr objectId object -> - STM m (ObjectPoolSnapshot objectId object idx) -> + DecisionGlobalStateVar m peerAddr objectId object -> + ObjectPoolWriter objectId object m -> peerAddr -> -- | number of requests to subtract from - -- `requestedObjectIdsInflight` - NumObjectIdsToReq -> + -- `numIdsInFlight` + NumObjectIdsReq -> -- | sequence of received `objectIds` StrictSeq objectId -> -- | received `objectId`s with sizes Map objectId SizeInBytes -> m () -receivedObjectIds tracer sharedVar getObjectPoolSnapshot peerAddr reqNo objectIdsSeq objectIdsMap = do +receivedObjectIds tracer sharedVar objectPoolWriter peerAddr reqNo objectIdsSeq objectIdsMap = do st <- atomically $ do - ObjectPoolSnapshot{objectpoolHasObject} <- getObjectPoolSnapshot + hasObject <- opwHasObject objectPoolWriter stateTVar sharedVar - ((\a -> (a, a)) . receivedObjectIdsImpl objectpoolHasObject peerAddr reqNo objectIdsSeq objectIdsMap) - traceWith tracer (TraceSharedObjectState "receivedObjectIds" st) + ((\a -> (a, a)) . receivedObjectIdsImpl hasObject peerAddr reqNo objectIdsSeq objectIdsMap) + traceWith tracer (TraceDecisionGlobalState "receivedObjectIds" st) --- | Include received `object`s in `SharedObjectState`. Return number of `objectIds` +-- | Include received `object`s in `DecisionGlobalState`. Return number of `objectIds` -- to be acknowledged and list of `object` to be added to the objectpool. collectObjects :: forall m peerAddr object objectId. @@ -622,7 +611,7 @@ collectObjects :: ) => Tracer m (TraceObjectLogic peerAddr objectId object) -> (object -> SizeInBytes) -> - SharedObjectStateVar m peerAddr objectId object -> + DecisionGlobalStateVar m peerAddr objectId object -> peerAddr -> -- | set of requested objectIds with their announced size Map objectId SizeInBytes -> @@ -641,6 +630,6 @@ collectObjects tracer objectSize sharedVar peerAddr objectIdsRequested objectsMa r@Left{} -> pure r case r of Right st -> - traceWith tracer (TraceSharedObjectState "collectObjects" st) + traceWith tracer (TraceDecisionGlobalState "collectObjects" st) $> Nothing Left e -> return (Just e) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 1f0ab5519c..cc5f805ccf 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -11,16 +11,16 @@ {-# LANGUAGE TypeApplications #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types - ( -- * PeerObjectState - PeerObjectState (..) + ( -- * DecisionPeerState + DecisionPeerState (..) - -- * SharedObjectState - , SharedObjectState (..) + -- * DecisionGlobalState + , DecisionGlobalState (..) -- * Decisions , ObjectsToObjectPool (..) - , ObjectDecision (..) - , emptyObjectDecision + , PeerDecision (..) + , emptyPeerDecision , TraceObjectLogic (..) , ObjectDiffusionInitDelay (..) , defaultObjectDiffusionInitDelay @@ -66,34 +66,30 @@ data ObjectDiffusionLogicVersion deriving (Eq, Show, Enum, Bounded) -- --- PeerObjectState, SharedObjectState +-- DecisionPeerState, DecisionGlobalState -- -data PeerObjectState objectId object = PeerObjectState - { unacknowledgedObjectIds :: !(StrictSeq objectId) +data DecisionPeerState objectId object = DecisionPeerState + { outstandingFifo :: !(StrictSeq objectId) -- ^ Those transactions (by their identifier) that the client has told -- us about, and which we have not yet acknowledged. This is kept in -- the order in which the client gave them to us. This is the same order -- in which we submit them to the objectpool (or for this example, the final -- result order). It is also the order we acknowledge in. - , availableObjectIds :: !(Map objectId SizeInBytes) + , availableObjectIds :: !(Set objectId) -- ^ Set of known transaction ids which can be requested from this peer. - , requestedObjectIdsInflight :: !NumObjectIdsToReq + , numIdsInFlight :: !NumObjectIdsReq -- ^ The number of transaction identifiers that we have requested but -- which have not yet been replied to. We need to track this it keep -- our requests within the limit on the number of unacknowledged objectIds. - , requestedObjectsInflightSize :: !SizeInBytes - -- ^ The size in bytes of transactions that we have requested but which - -- have not yet been replied to. We need to track this to keep our - -- requests within the `maxObjectsSizeInflight` limit. - , requestedObjectsInflight :: !(Set objectId) + , inFlight :: !(Set objectId) -- ^ The set of requested `objectId`s. - , unknownObjects :: !(Set objectId) - -- ^ A subset of `unacknowledgedObjectIds` which were unknown to the peer + , requestedButNotReceived :: !(Set objectId) + -- ^ A subset of `outstandingFifo` which were unknown to the peer -- (i.e. requested but not received). We need to track these `objectId`s -- since they need to be acknowledged. -- - -- We track these `objectId` per peer, rather than in `bufferedObjects` map, + -- We track these `objectId` per peer, rather than in `globalObtainedButNotAckedObjects` map, -- since that could potentially lead to corrupting the node, not being -- able to download a `object` which is needed & available from other nodes. , score :: !Double @@ -102,16 +98,16 @@ data PeerObjectState objectId object = PeerObjectState -- zero. , scoreTs :: !Time -- ^ Timestamp for the last time `score` was drained. - , downloadedObjects :: !(Map objectId object) + , pendingObjects :: !(Map objectId object) -- ^ A set of OBJECTs downloaded from the peer. They are not yet -- acknowledged and haven't been sent to the objectpool yet. -- -- Life cycle of entries: -- * added when a object is downloaded (see `collectObjectsImpl`) - -- * follows `unacknowledgedObjectIds` (see `acknowledgeObjectIds`) - , toObjectPoolObjects :: !(Map objectId object) + -- * follows `outstandingFifo` (see `acknowledgeObjectIds`) + , toPoolObjects :: !(Map objectId object) -- ^ A set of OBJECTs on their way to the objectpool. - -- Tracked here so that we can cleanup `inSubmissionToObjectPoolObjects` if the + -- Tracked here so that we can cleanup `globalToPoolObjects` if the -- peer dies. -- -- Life cycle of entries: @@ -125,62 +121,60 @@ instance ( NoThunks objectId , NoThunks object ) => - NoThunks (PeerObjectState objectId object) + NoThunks (DecisionPeerState objectId object) -- | Shared state of all `ObjectDiffusion` clients. -- --- New `objectId` enters `unacknowledgedObjectIds` it is also added to `availableObjectIds` +-- New `objectId` enters `outstandingFifo` it is also added to `availableObjectIds` -- and `referenceCounts` (see `acknowledgeObjectIdsImpl`). -- -- When a `objectId` id is selected to be downloaded, it's added to --- `requestedObjectsInflightSize` (see +-- `inFlightSize` (see -- `Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.Decision.pickObjectsToDownload`). -- --- When the request arrives, the `objectId` is removed from `inflightObjects`. It --- might be added to `unknownObjects` if the server didn't have that `objectId`, or --- it's added to `bufferedObjects` (see `collectObjectsImpl`). +-- When the request arrives, the `objectId` is removed from `globalInFlightObjects`. It +-- might be added to `requestedButNotReceived` if the server didn't have that `objectId`, or +-- it's added to `globalObtainedButNotAckedObjects` (see `collectObjectsImpl`). -- -- Whenever we choose `objectId` to acknowledge (either in `acknowledobjectsIdsImpl`, -- `collectObjectsImpl` or -- `Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.Decision.pickObjectsToDownload`, we also -- recalculate `referenceCounts` and only keep live `objectId`s in other maps (e.g. --- `availableObjectIds`, `bufferedObjects`, `unknownObjects`). -data SharedObjectState peerAddr objectId object = SharedObjectState - { peerObjectStates :: !(Map peerAddr (PeerObjectState objectId object)) +-- `availableObjectIds`, `globalObtainedButNotAckedObjects`, `requestedButNotReceived`). +data DecisionGlobalState peerAddr objectId object = DecisionGlobalState + { peerStates :: !(Map peerAddr (DecisionPeerState objectId object)) -- ^ Map of peer states. -- -- /Invariant:/ for peerAddr's which are registered using `withPeer`, -- there's always an entry in this map even if the set of `objectId`s is -- empty. - , inflightObjects :: !(Map objectId Int) + , globalInFlightObjects :: !(Map objectId Int) -- ^ Set of transactions which are in-flight (have already been -- requested) together with multiplicities (from how many peers it is -- currently in-flight) -- -- This set can intersect with `availableObjectIds`. - , inflightObjectsSize :: !SizeInBytes - -- ^ Overall size of all `object`s in-flight. - , bufferedObjects :: !(Map objectId (Maybe object)) + , globalObtainedButNotAckedObjects :: !(Map objectId (Maybe object)) -- ^ Map of `object` which: -- -- * were downloaded and added to the objectpool, -- * are already in the objectpool (`Nothing` is inserted in that case), -- -- We only keep live `objectId`, e.g. ones which `objectId` is unacknowledged by - -- at least one peer or has a `timedObjects` entry. + -- at least one peer or has a `globalRententionTimeouts` entry. -- -- /Note:/ `objectId`s which `object` were unknown by a peer are tracked - -- separately in `unknownObjects`. + -- separately in `requestedButNotReceived`. -- -- /Note:/ previous implementation also needed to explicitly track -- `objectId`s which were already acknowledged, but are still unacknowledged. -- In this implementation, this is done using reference counting. -- -- This map is useful to acknowledge `objectId`s, it's basically taking the - -- longest prefix which contains entries in `bufferedObjects` or `unknownObjects`. + -- longest prefix which contains entries in `globalObtainedButNotAckedObjects` or `requestedButNotReceived`. , referenceCounts :: !(Map objectId Int) - -- ^ We track reference counts of all unacknowledged and timedObjects objectIds. - -- Once the count reaches 0, a object is removed from `bufferedObjects`. + -- ^ We track reference counts of all unacknowledged and globalRententionTimeouts objectIds. + -- Once the count reaches 0, a object is removed from `globalObtainedButNotAckedObjects`. -- -- The `bufferedObject` map contains a subset of `objectId` which -- `referenceCounts` contains. @@ -188,11 +182,11 @@ data SharedObjectState peerAddr objectId object = SharedObjectState -- /Invariants:/ -- -- * the objectId count is equal to multiplicity of objectId in all - -- `unacknowledgedObjectIds` sequences; - -- * @Map.keysSet bufferedObjects `Set.isSubsetOf` Map.keysSet referenceCounts@; + -- `outstandingFifo` sequences; + -- * @Map.keysSet globalObtainedButNotAckedObjects `Set.isSubsetOf` Map.keysSet referenceCounts@; -- * all counts are positive integers. - , timedObjects :: !(Map Time [objectId]) - -- ^ A set of timeouts for objectIds that have been added to bufferedObjects after being + , globalRententionTimeouts :: !(Map Time [objectId]) + -- ^ A set of timeouts for objectIds that have been added to globalObtainedButNotAckedObjects after being -- inserted into the objectpool. -- -- We need these short timeouts to avoid re-downloading a `object`. We could @@ -200,18 +194,18 @@ data SharedObjectState peerAddr objectId object = SharedObjectState -- continent presents us it again. -- -- Every objectId entry has a reference count in `referenceCounts`. - , inSubmissionToObjectPoolObjects :: !(Map objectId Int) + , globalToPoolObjects :: !(Map objectId Int) -- ^ A set of objectIds that have been downloaded by a peer and are on their -- way to the objectpool. We won't issue further fetch-requests for OBJECTs in -- this state. We track these objects to not re-download them from another -- peer. -- -- * We subtract from the counter when a given object is added or rejected by - -- the objectpool or do that for all objects in `toObjectPoolObjects` when a peer is + -- the objectpool or do that for all objects in `toPoolObjects` when a peer is -- unregistered. -- * We add to the counter when a given object is selected to be added to the -- objectpool in `pickObjectsToDownload`. - , peerRng :: !StdGen + , orderRng :: !StdGen -- ^ Rng used to randomly order peers } deriving (Eq, Show, Generic) @@ -222,7 +216,7 @@ instance , NoThunks objectId , NoThunks StdGen ) => - NoThunks (SharedObjectState peerAddr objectId object) + NoThunks (DecisionGlobalState peerAddr objectId object) -- -- Decisions @@ -242,10 +236,10 @@ newtype ObjectsToObjectPool objectId object = ObjectsToObjectPool {listOfObjects -- (e.g. it won't be returned by `filterActivePeers`) for longer, and thus the -- expensive `makeDecision` computation will not need to take that peer into -- account. -data ObjectDecision objectId object = ObjectDecision - { objectIdsToAcknowledge :: !NumObjectIdsToAck +data PeerDecision objectId object = PeerDecision + { objectIdsToAcknowledge :: !NumObjectIdsAck -- ^ objectId's to acknowledge - , objectIdsToRequest :: !NumObjectIdsToReq + , objectIdsToRequest :: !NumObjectIdsReq -- ^ number of objectId's to request , objectPipelineObjectIds :: !Bool -- ^ the object-submission protocol only allows to pipeline `objectId`'s requests @@ -260,23 +254,23 @@ data ObjectDecision objectId object = ObjectDecision -- | A non-commutative semigroup instance. -- -- /note:/ this instance must be consistent with `pickObjectsToDownload` and how --- `PeerObjectState` is updated. It is designed to work with `TMergeVar`s. -instance Ord objectId => Semigroup (ObjectDecision objectId object) where - ObjectDecision +-- `DecisionPeerState` is updated. It is designed to work with `TMergeVar`s. +instance Ord objectId => Semigroup (PeerDecision objectId object) where + PeerDecision { objectIdsToAcknowledge , objectIdsToRequest , objectPipelineObjectIds = _ignored , objectsToRequest , objectsToObjectPool } - <> ObjectDecision + <> PeerDecision { objectIdsToAcknowledge = objectIdsToAcknowledge' , objectIdsToRequest = objectIdsToRequest' , objectPipelineObjectIds = objectPipelineObjectIds' , objectsToRequest = objectsToRequest' , objectsToObjectPool = objectsToObjectPool' } = - ObjectDecision + PeerDecision { objectIdsToAcknowledge = objectIdsToAcknowledge + objectIdsToAcknowledge' , objectIdsToRequest = objectIdsToRequest + objectIdsToRequest' , objectPipelineObjectIds = objectPipelineObjectIds' @@ -285,9 +279,9 @@ instance Ord objectId => Semigroup (ObjectDecision objectId object) where } -- | A no-op decision. -emptyObjectDecision :: ObjectDecision objectId object -emptyObjectDecision = - ObjectDecision +emptyPeerDecision :: PeerDecision objectId object +emptyPeerDecision = + PeerDecision { objectIdsToAcknowledge = 0 , objectIdsToRequest = 0 , objectPipelineObjectIds = False @@ -297,8 +291,8 @@ emptyObjectDecision = -- | ObjectLogic tracer. data TraceObjectLogic peerAddr objectId object - = TraceSharedObjectState String (SharedObjectState peerAddr objectId object) - | TraceObjectDecisions (Map peerAddr (ObjectDecision objectId object)) + = TraceDecisionGlobalState String (DecisionGlobalState peerAddr objectId object) + | TracePeerDecisions (Map peerAddr (PeerDecision objectId object)) deriving Show data ProcessedObjectCount = ProcessedObjectCount @@ -314,7 +308,7 @@ data ProcessedObjectCount = ProcessedObjectCount -- submission logic requires. -- -- This is provided to the object submission logic by the consensus layer. -data ObjectDiffusionObjectPoolWriter objectId object idx m +data ObjectDiffusionObjectPoolWriter objectId object ticketNo m = ObjectDiffusionObjectPoolWriter { objectId :: object -> objectId -- ^ Compute the transaction id from a transaction. @@ -347,14 +341,14 @@ data TraceObjectDiffusionInbound objectId object -- | Server received 'MsgDone' TraceObjectInboundTerminated - | TraceObjectInboundDecision (ObjectDecision objectId object) + | TraceObjectInboundDecision (PeerDecision objectId object) deriving (Eq, Show) data ObjectDiffusionCounters = ObjectDiffusionCounters { numOfOutstandingObjectIds :: Int -- ^ objectIds which are not yet downloaded. This is a diff of keys sets of - -- `referenceCounts` and a sum of `bufferedObjects` and + -- `referenceCounts` and a sum of `globalObtainedButNotAckedObjects` and -- `inbubmissionToObjectPoolObjects` maps. , numOfBufferedObjects :: Int -- ^ number of all buffered objects (downloaded or not available) @@ -367,24 +361,24 @@ data ObjectDiffusionCounters mkObjectDiffusionCounters :: Ord objectId => - SharedObjectState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object -> ObjectDiffusionCounters mkObjectDiffusionCounters - SharedObjectState - { inflightObjects - , bufferedObjects + DecisionGlobalState + { globalInFlightObjects + , globalObtainedButNotAckedObjects , referenceCounts - , inSubmissionToObjectPoolObjects + , globalToPoolObjects } = ObjectDiffusionCounters { numOfOutstandingObjectIds = Set.size $ Map.keysSet referenceCounts - Set.\\ Map.keysSet bufferedObjects - Set.\\ Map.keysSet inSubmissionToObjectPoolObjects - , numOfBufferedObjects = Map.size bufferedObjects - , numOfInSubmissionToObjectPoolObjects = Map.size inSubmissionToObjectPoolObjects - , numOfObjectIdsInflight = getSum $ foldMap Sum inflightObjects + Set.\\ Map.keysSet globalObtainedButNotAckedObjects + Set.\\ Map.keysSet globalToPoolObjects + , numOfBufferedObjects = Map.size globalObtainedButNotAckedObjects + , numOfInSubmissionToObjectPoolObjects = Map.size globalToPoolObjects + , numOfObjectIdsInflight = getSum $ foldMap Sum globalInFlightObjects } data ObjectDiffusionProtocolError From 511ed4f0bbdcb4a375754e1eab33fcdb1f3043ec Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Wed, 1 Oct 2025 17:18:24 +0200 Subject: [PATCH 04/43] wipper --- .../ObjectDiffusion/Inbound/V2/Decision.hs | 85 +++++++------------ .../ObjectDiffusion/Inbound/V2/Policy.hs | 9 +- .../ObjectDiffusion/Inbound/V2/State.hs | 22 ++--- 3 files changed, 45 insertions(+), 71 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 2af0e48429..107b7989fd 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -89,8 +89,8 @@ orderByRejections salt = -- | Internal state of `pickObjectsToDownload` computation. data St peerAddr objectId object = St - { stInflightSize :: !SizeInBytes - -- ^ size of all `object`s in-flight. + { stInflightNum :: !NumObjects + -- ^ number of all `object`s in-flight. , stInflight :: !(Map objectId Int) -- ^ `objectId`s in-flight. , stAcknowledged :: !(Map objectId Int) @@ -107,9 +107,9 @@ data St peerAddr objectId object -- * pick objects from the set of available object's (in `objectId` order, note these sets -- might be different for different peers). -- * pick objects until the peers in-flight limit (we can go over the limit by one object) --- (`objectsSizeInflightPerPeer` limit) +-- (`objectsNumInflightPerPeer` limit) -- * pick objects until the overall in-flight limit (we can go over the limit by one object) --- (`maxObjectsSizeInflight` limit) +-- (`maxObjectsNumInflight` limit) -- * each object can be downloaded simultaneously from at most -- `objectInflightMultiplicity` peers. pickObjectsToDownload :: @@ -127,14 +127,13 @@ pickObjectsToDownload :: ) pickObjectsToDownload policy@PeerDecisionPolicy - { objectsSizeInflightPerPeer - , maxObjectsSizeInflight + { objectsNumInflightPerPeer + , maxObjectsNumInflight , objectInflightMultiplicity } sharedState@DecisionGlobalState { peerStates , globalInFlightObjects - , globalInFlightObjectsSize , globalObtainedButNotAckedObjects , globalToPoolObjects , referenceCounts @@ -145,7 +144,7 @@ pickObjectsToDownload -- initial state St { stInflight = globalInFlightObjects - , stInflightSize = globalInFlightObjectsSize + , stInflightNum = globalInFlightObjectsNum , stAcknowledged = Map.empty , stInSubmissionToObjectPoolObjects = Map.keysSet globalToPoolObjects } @@ -162,7 +161,7 @@ pickObjectsToDownload accumFn st@St { stInflight - , stInflightSize + , stInflightNum , stAcknowledged , stInSubmissionToObjectPoolObjects } @@ -171,15 +170,14 @@ pickObjectsToDownload { availableObjectIds , requestedButNotReceived , inFlight - , inFlightSize } ) = - let sizeInflightAll :: SizeInBytes - sizeInflightOther :: SizeInBytes + let sizeInflightAll :: NumObjects + sizeInflightOther :: NumObjects - sizeInflightAll = stInflightSize - sizeInflightOther = sizeInflightAll - inFlightSize - in if sizeInflightAll >= maxObjectsSizeInflight + sizeInflightAll = stInflightNum + sizeInflightOther = sizeInflightAll - Set.size inFlight + in if sizeInflightAll >= maxObjectsNumInflight then let ( numObjectIdsToAck , numObjectIdsToReq @@ -224,23 +222,23 @@ pickObjectsToDownload ) ) else - let inFlightSize' :: SizeInBytes - objectsToRequestMap :: Map objectId SizeInBytes + let inFlightNum' :: NumObjects + objectsToRequestMap :: Set objectId - (inFlightSize', objectsToRequestMap) = + (inFlightNum', objectsToRequestMap) = -- inner fold: fold available `objectId`s -- -- Note: although `Map.foldrWithKey` could be used here, it -- does not allow to short circuit the fold, unlike -- `foldWithState`. foldWithState - ( \(objectId, (objectSize, inflightMultiplicity)) sizeInflight -> + ( \(objectId, (_objectSize, inflightMultiplicity)) sizeInflight -> if -- note that we pick `objectId`'s as long the `s` is - -- smaller or equal to `objectsSizeInflightPerPeer`. - sizeInflight <= objectsSizeInflightPerPeer + -- smaller or equal to `objectsNumInflightPerPeer`. + sizeInflight <= objectsNumInflightPerPeer -- overall `object`'s in-flight must be smaller than - -- `maxObjectsSizeInflight` - && sizeInflight + sizeInflightOther <= maxObjectsSizeInflight + -- `maxObjectsNumInflight` + && sizeInflight + sizeInflightOther <= maxObjectsNumInflight -- the transaction must not be downloaded from more -- than `objectInflightMultiplicity` peers simultaneously && inflightMultiplicity < objectInflightMultiplicity @@ -261,26 +259,20 @@ pickObjectsToDownload stInflight -- remove `object`s which were already downloaded by some -- other peer or are in-flight or unknown by this peer. - `Map.withoutKeys` ( Map.keysSet globalObtainedButNotAckedObjects + `Set.unions` ( Map.keysSet globalObtainedButNotAckedObjects <> inFlight <> requestedButNotReceived <> stInSubmissionToObjectPoolObjects ) ) - inFlightSize + inFlightNum -- pick from `objectId`'s which are available from that given -- peer. Since we are folding a dictionary each `objectId` -- will be selected only once from a given peer (at least -- in each round). objectsToRequest = Map.keysSet objectsToRequestMap - peerObjectState' = - peerObjectState - { inFlightSize = inFlightSize' - , inFlight = - inFlight - <> objectsToRequest - } + peerObjectState' = peerObjectState {inFlight = inFlight <> objectsToRequest} ( numObjectIdsToAck , numObjectIdsToReq @@ -307,7 +299,7 @@ pickObjectsToDownload -- we can request `objectId`s & `object`s ( St { stInflight = stInflight' - , stInflightSize = sizeInflightOther + inFlightSize' + , stInflightNum = undefined , stAcknowledged = stAcknowledged' , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' } @@ -330,7 +322,6 @@ pickObjectsToDownload -- there are no `objectId`s to request, only `object`s. ( st { stInflight = stInflight' - , stInflightSize = sizeInflightOther + inFlightSize' , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' } , @@ -349,7 +340,6 @@ pickObjectsToDownload gn ( St { stInflight - , stInflightSize , stAcknowledged } , as @@ -381,7 +371,6 @@ pickObjectsToDownload in ( sharedState { peerStates = peerStates' , globalInFlightObjects = stInflight - , globalInFlightObjectsSize = stInflightSize , globalObtainedButNotAckedObjects = globalObtainedButNotAckedObjects' , referenceCounts = referenceCounts' , globalToPoolObjects = globalToPoolObjects' @@ -424,7 +413,7 @@ pickObjectsToDownload objectId x --- | Filter peers which can either download a `object` or acknowledge `objectId`s. +-- | Filter peers which can either download an `object` or acknowledge `objectId`s. filterActivePeers :: forall peerAddr objectId object. Ord objectId => @@ -435,23 +424,15 @@ filterActivePeers :: filterActivePeers policy@PeerDecisionPolicy { maxUnacknowledgedObjectIds - , objectsSizeInflightPerPeer - , maxObjectsSizeInflight , objectInflightMultiplicity } sharedObjectState@DecisionGlobalState { peerStates , globalObtainedButNotAckedObjects , globalInFlightObjects - , globalInFlightObjectsSize , globalToPoolObjects } - | globalInFlightObjectsSize > maxObjectsSizeInflight = - -- we might be able to request objectIds, we cannot download objects - Map.filter fn peerStates - | otherwise = - -- we might be able to request objectIds or objects. - Map.filter gn peerStates + = Map.filter gn peerStates where unrequestable = Map.keysSet (Map.filter (>= objectInflightMultiplicity) globalInFlightObjects) @@ -478,7 +459,6 @@ filterActivePeers { outstandingFifo , numIdsInFlight , inFlight - , inFlightSize , availableObjectIds , requestedButNotReceived } = @@ -486,16 +466,15 @@ filterActivePeers && numIdsInFlight + numOfUnacked <= maxUnacknowledgedObjectIds && objectIdsToRequest > 0 ) - || (underSizeLimit && not (Map.null downloadable)) + || (not (Set.null downloadable)) where numOfUnacked = fromIntegral (StrictSeq.length outstandingFifo) - underSizeLimit = inFlightSize <= objectsSizeInflightPerPeer downloadable = availableObjectIds - `Map.withoutKeys` inFlight - `Map.withoutKeys` requestedButNotReceived - `Map.withoutKeys` unrequestable - `Map.withoutKeys` Map.keysSet globalToPoolObjects + `Set.difference` inFlight + `Set.difference` requestedButNotReceived + `Set.difference` unrequestable + `Set.difference` Map.keysSet globalToPoolObjects -- Split `outstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs index 49ef5569ca..244aa618d8 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -2,6 +2,7 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy ( PeerDecisionPolicy (..) + , NumObjects , defaultPeerDecisionPolicy -- * Re-exports @@ -26,9 +27,9 @@ data PeerDecisionPolicy = PeerDecisionPolicy -- Configuration of object decision logic. -- - objectsSizeInflightPerPeer :: !NumObjects + objectsNumInflightPerPeer :: !NumObjects -- ^ a limit of objects in-flight from a single peer, plus or minus 1. - , maxObjectsSizeInflight :: !NumObjects + , maxObjectsNumInflight :: !NumObjects -- ^ a limit of object size in-flight from all peers, plus or minus 1 , objectInflightMultiplicity :: !Int -- ^ from how many peers download the `objectId` simultaneously @@ -49,8 +50,8 @@ defaultPeerDecisionPolicy = PeerDecisionPolicy { maxNumObjectIdsRequest = 3 , maxUnacknowledgedObjectIds = 10 -- must be the same as objectDiffusionMaxUnacked - , objectsSizeInflightPerPeer = NumObjects 6 - , maxObjectsSizeInflight = NumObjects 20 + , objectsNumInflightPerPeer = NumObjects 6 + , maxObjectsNumInflight = NumObjects 20 , objectInflightMultiplicity = 2 , globalObtainedButNotAckedObjectsMinLifetime = 2 , scoreRate = 0.1 diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index db362dc350..9b6c2e104e 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -4,6 +4,7 @@ {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TupleSections #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State ( -- * Core API @@ -137,10 +138,7 @@ acknowledgeObjectIds -- the set of live `objectIds` liveSet = Set.fromList (toList outstandingFifo') - - availableObjectIds' = - availableObjectIds - `Map.restrictKeys` liveSet + availableObjectIds' = availableObjectIds `Set.intersection` liveSet -- We remove all acknowledged `objectId`s which are not in -- `outstandingFifo''`, but also return the unknown set before any @@ -370,7 +368,7 @@ receivedObjectIdsImpl -- Note: we prefer to keep the `object` if it's already in `globalObtainedButNotAckedObjects`. globalObtainedButNotAckedObjects' = globalObtainedButNotAckedObjects - <> Map.map (const Nothing) ignoredObjectIds + <> Map.fromList ((, Nothing) <$> Set.toList ignoredObjectIds) referenceCounts' = Foldable.foldl' @@ -495,8 +493,6 @@ collectObjectsImpl assert (requestedObjectIds `Set.isSubsetOf` inFlight ps) $ inFlight ps Set.\\ requestedObjectIds - requestedSize = fold $ availableObjectIds ps `Map.restrictKeys` requestedObjectIds - -- subtract requested from in-flight globalInFlightObjects'' = Map.merge @@ -529,9 +525,7 @@ collectObjectsImpl -- NOTE: we could remove `notReceived` from `availableObjectIds`; and -- possibly avoid using `requestedButNotReceived` field at all. -- - availableObjectIds'' = - availableObjectIds ps - `Map.withoutKeys` requestedObjectIds + availableObjectIds'' = availableObjectIds ps `Set.difference` requestedObjectIds -- Remove all acknowledged `objectId`s from unknown set, but only those -- which are not present in `outstandingFifo'` @@ -588,15 +582,15 @@ receivedObjectIds :: NumObjectIdsReq -> -- | sequence of received `objectIds` StrictSeq objectId -> - -- | received `objectId`s with sizes - Map objectId SizeInBytes -> + -- | received `objectId`s + Set objectId -> m () -receivedObjectIds tracer sharedVar objectPoolWriter peerAddr reqNo objectIdsSeq objectIdsMap = do +receivedObjectIds tracer sharedVar objectPoolWriter peerAddr reqNo objectIdsSeq objectIds = do st <- atomically $ do hasObject <- opwHasObject objectPoolWriter stateTVar sharedVar - ((\a -> (a, a)) . receivedObjectIdsImpl hasObject peerAddr reqNo objectIdsSeq objectIdsMap) + ((\a -> (a, a)) . receivedObjectIdsImpl hasObject peerAddr reqNo objectIdsSeq objectIds) traceWith tracer (TraceDecisionGlobalState "receivedObjectIds" st) -- | Include received `object`s in `DecisionGlobalState`. Return number of `objectIds` From 6ee28bf7151a2727102216c23bbf693ac983b20f Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Fri, 3 Oct 2025 15:43:20 +0200 Subject: [PATCH 05/43] Finish most refactor on Types.hs --- .../ObjectDiffusion/Inbound/V2.hs | 42 +- .../ObjectDiffusion/Inbound/V2/Decision.hs | 330 ++++++++-------- .../ObjectDiffusion/Inbound/V2/Policy.hs | 64 ++- .../ObjectDiffusion/Inbound/V2/Registry.hs | 196 +++++----- .../ObjectDiffusion/Inbound/V2/State.hs | 298 +++++++------- .../ObjectDiffusion/Inbound/V2/Types.hs | 369 +++++++----------- 6 files changed, 610 insertions(+), 689 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 1565ab7af1..b27d3a2740 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -22,8 +22,8 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 , newObjectObjectPoolSem , DecisionGlobalStateVar , newDecisionGlobalStateVar - , PeerDecisionPolicy (..) - , defaultPeerDecisionPolicy + , DecisionPolicy (..) + , defaultDecisionPolicy ) where import Control.Exception (assert) @@ -80,27 +80,27 @@ objectDiffusionInbound serverIdle = do -- Block on next decision. object@PeerDecision - { objectsToRequest = objectsToRequest - , objectsToObjectPool = ObjectsToObjectPool{listOfObjectsToObjectPool} + { pdObjectsToReqIds = pdObjectsToReqIds + , pdObjectsOwtPool = [(objectId, object)]{listOf[(objectId, object)]} } <- readPeerDecision traceWith tracer (TraceObjectInboundDecision object) - let !collected = length listOfObjectsToObjectPool + let !collected = length listOf[(objectId, object)] - -- Only attempt to add OBJECTs if we have some work to do + -- Only attempt to add objects if we have some work to do when (collected > 0) $ do -- submitObjectToObjectPool traces: -- \* `TraceObjectDiffusionProcessed`, -- \* `TraceObjectInboundAddedToObjectPool`, and -- \* `TraceObjectInboundRejectedFromObjectPool` -- events. - mapM_ (uncurry $ submitObjectToObjectPool tracer) listOfObjectsToObjectPool + mapM_ (uncurry $ submitObjectToObjectPool tracer) listOf[(objectId, object)] -- TODO: -- We can update the state so that other `object-submission` servers will -- not try to add these objects to the objectpool. - if Map.null objectsToRequest + if Map.null pdObjectsToReqIds then serverReqObjectIds Zero object else serverReqObjects object @@ -108,10 +108,10 @@ objectDiffusionInbound serverReqObjects :: PeerDecision objectId object -> m (ServerStIdle Z objectId object m ()) - serverReqObjects object@PeerDecision{objectsToRequest = objectsToRequest} = + serverReqObjects object@PeerDecision{pdObjectsToReqIds = pdObjectsToReqIds} = pure $ SendMsgRequestObjectsPipelined - objectsToRequest + pdObjectsToReqIds (serverReqObjectIds (Succ Zero) object) serverReqObjectIds :: @@ -121,7 +121,7 @@ objectDiffusionInbound m (ServerStIdle n objectId object m ()) serverReqObjectIds n - PeerDecision{objectIdsToRequest = 0} = + PeerDecision{pdIdsToReq = 0} = case n of Zero -> serverIdle Succ _ -> handleReplies n @@ -132,9 +132,9 @@ objectDiffusionInbound -- mini-protocol. Zero PeerDecision - { objectIdsToAcknowledge = objectIdsToAck - , objectPipelineObjectIds = False - , objectIdsToRequest = objectIdsToReq + { pdIdsToAck = objectIdsToAck + , pdCanPipelineIdsReq = False + , pdIdsToReq = objectIdsToReq } = pure $ SendMsgRequestObjectIdsBlocking @@ -154,9 +154,9 @@ objectDiffusionInbound serverReqObjectIds n@Zero PeerDecision - { objectIdsToAcknowledge = objectIdsToAck - , objectPipelineObjectIds = True - , objectIdsToRequest = objectIdsToReq + { pdIdsToAck = objectIdsToAck + , pdCanPipelineIdsReq = True + , pdIdsToReq = objectIdsToReq } = pure $ SendMsgRequestObjectIdsPipelined @@ -166,13 +166,13 @@ objectDiffusionInbound serverReqObjectIds n@Succ{} PeerDecision - { objectIdsToAcknowledge = objectIdsToAck - , objectPipelineObjectIds - , objectIdsToRequest = objectIdsToReq + { pdIdsToAck = objectIdsToAck + , pdCanPipelineIdsReq + , pdIdsToReq = objectIdsToReq } = -- it is impossible that we have had `object`'s to request (Succ{} - is an -- evidence for that), but no unacknowledged `objectId`s. - assert objectPipelineObjectIds $ + assert pdCanPipelineIdsReq $ pure $ SendMsgRequestObjectIdsPipelined objectIdsToAck diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 107b7989fd..bf638c86b9 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -45,12 +45,12 @@ makeDecisions :: , Hashable peerAddr ) => -- | decision policy - PeerDecisionPolicy -> + DecisionPolicy -> -- | decision context DecisionGlobalState peerAddr objectId object -> -- | list of available peers. -- - -- This is a subset of `peerStates` of peers which either: + -- This is a subset of `dgsPeerStates` of peers which either: -- * can be used to download a `object`, -- * can acknowledge some `objectId`s. Map peerAddr (DecisionPeerState objectId object) -> @@ -58,8 +58,8 @@ makeDecisions :: , Map peerAddr (PeerDecision objectId object) ) makeDecisions policy st = - let (salt, rng') = random (orderRng st) - st' = st{orderRng = rng'} + let (salt, rng') = random (dgsRng st) + st' = st{dgsRng = rng'} in fn . pickObjectsToDownload policy st' . orderByRejections salt @@ -70,11 +70,11 @@ makeDecisions policy st = (a, Map peerAddr (PeerDecision objectId object)) fn (a, as) = (a, Map.fromList as) --- | Order peers by how useful the OBJECTs they have provided are. +-- | Order peers by how useful the objects they have provided are. -- --- OBJECTs delivered late will fail to apply because they were included in +-- objects delivered late will fail to apply because they were included in -- a recently adopted block. Peers can race against each other by setting --- `objectInflightMultiplicity` to > 1. In case of a tie a hash of the peerAddr +-- `dpMaxObjectInflightMultiplicity` to > 1. In case of a tie a hash of the peerAddr -- is used as a tie breaker. Since every invocation use a new salt a given -- peerAddr does not have an advantage over time. orderByRejections :: @@ -83,21 +83,21 @@ orderByRejections :: Map peerAddr (DecisionPeerState objectId object) -> [(peerAddr, DecisionPeerState objectId object)] orderByRejections salt = - List.sortOn (\(peerAddr, ps) -> (score ps, hashWithSalt salt peerAddr)) + List.sortOn (\(peerAddr, ps) -> (dpsScore ps, hashWithSalt salt peerAddr)) . Map.toList -- | Internal state of `pickObjectsToDownload` computation. -data St peerAddr objectId object - = St - { stInflightNum :: !NumObjects +data DecisionInternalState peerAddr objectId object + = DecisionInternalState + { disNumObjectsInflight :: !NumObjectsReq -- ^ number of all `object`s in-flight. - , stInflight :: !(Map objectId Int) + , disObjectsInflightMultiplicities :: !(Map objectId Int) -- ^ `objectId`s in-flight. - , stAcknowledged :: !(Map objectId Int) + , disObjectsAckedMultiplicities :: !(Map objectId Int) -- ^ acknowledged `objectId` with multiplicities. It is used to update - -- `referenceCounts`. - , stInSubmissionToObjectPoolObjects :: Set objectId - -- ^ OBJECTs on their way to the objectpool. Used to prevent issueing new + -- `dgsObjectReferenceCounts`. + , disObjectsOwtPoolds :: Set objectId + -- ^ objects on their way to the objectpool. Used to prevent issueing new -- fetch requests for them. } @@ -107,18 +107,18 @@ data St peerAddr objectId object -- * pick objects from the set of available object's (in `objectId` order, note these sets -- might be different for different peers). -- * pick objects until the peers in-flight limit (we can go over the limit by one object) --- (`objectsNumInflightPerPeer` limit) +-- (`dpMaxNumObjectsInflightPerPeer` limit) -- * pick objects until the overall in-flight limit (we can go over the limit by one object) --- (`maxObjectsNumInflight` limit) +-- (`dpMaxNumObjectsInflightTotal` limit) -- * each object can be downloaded simultaneously from at most --- `objectInflightMultiplicity` peers. +-- `dpMaxObjectInflightMultiplicity` peers. pickObjectsToDownload :: forall peerAddr objectId object. ( Ord peerAddr , Ord objectId ) => -- | decision policy - PeerDecisionPolicy -> + DecisionPolicy -> -- | shared state DecisionGlobalState peerAddr objectId object -> [(peerAddr, DecisionPeerState objectId object)] -> @@ -126,89 +126,89 @@ pickObjectsToDownload :: , [(peerAddr, PeerDecision objectId object)] ) pickObjectsToDownload - policy@PeerDecisionPolicy - { objectsNumInflightPerPeer - , maxObjectsNumInflight - , objectInflightMultiplicity + policy@DecisionPolicy + { dpMaxNumObjectsInflightPerPeer + , dpMaxNumObjectsInflightTotal + , dpMaxObjectInflightMultiplicity } sharedState@DecisionGlobalState - { peerStates - , globalInFlightObjects - , globalObtainedButNotAckedObjects - , globalToPoolObjects - , referenceCounts + { dgsPeerStates + , dgsObjectsInflightMultiplicities + , dgsObjectsPending + , dgsObjectsOwtPool + , dgsObjectReferenceCounts } = -- outer fold: fold `[(peerAddr, DecisionPeerState objectId object)]` List.mapAccumR accumFn -- initial state - St - { stInflight = globalInFlightObjects - , stInflightNum = globalInFlightObjectsNum - , stAcknowledged = Map.empty - , stInSubmissionToObjectPoolObjects = Map.keysSet globalToPoolObjects + DecisionInternalState + { disObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities + , disNumObjectsInflight = dgsObjectsInflightMultiplicitiesNum + , disObjectsAckedMultiplicities = Map.empty + , disObjectsOwtPoolds = Map.keysSet dgsObjectsOwtPool } >>> gn where accumFn :: - St peerAddr objectId object -> + DecisionInternalState peerAddr objectId object -> (peerAddr, DecisionPeerState objectId object) -> - ( St peerAddr objectId object + ( DecisionInternalState peerAddr objectId object , ( (peerAddr, DecisionPeerState objectId object) , PeerDecision objectId object ) ) accumFn - st@St - { stInflight - , stInflightNum - , stAcknowledged - , stInSubmissionToObjectPoolObjects + st@DecisionInternalState + { disObjectsInflightMultiplicities + , disNumObjectsInflight + , disObjectsAckedMultiplicities + , disObjectsOwtPoolds } ( peerAddr , peerObjectState@DecisionPeerState - { availableObjectIds - , requestedButNotReceived - , inFlight + { dpsIdsAvailable + , dpsObjectsRequestedButNotReceivedIds + , dpsObjectsInflightIds } ) = let sizeInflightAll :: NumObjects sizeInflightOther :: NumObjects - sizeInflightAll = stInflightNum - sizeInflightOther = sizeInflightAll - Set.size inFlight - in if sizeInflightAll >= maxObjectsNumInflight + sizeInflightAll = disNumObjectsInflight + sizeInflightOther = sizeInflightAll - Set.size dpsObjectsInflightIds + in if sizeInflightAll >= dpMaxNumObjectsInflightTotal then let ( numObjectIdsToAck , numObjectIdsToReq - , objectsToObjectPool@ObjectsToObjectPool{listOfObjectsToObjectPool} + , pdObjectsOwtPool@[(objectId, object)]{listOf[(objectId, object)]} , RefCountDiff{objectIdsToAck} , peerObjectState' ) = acknowledgeObjectIds policy sharedState peerObjectState - stAcknowledged' = Map.unionWith (+) stAcknowledged objectIdsToAck - stInSubmissionToObjectPoolObjects' = - stInSubmissionToObjectPoolObjects - <> Set.fromList (map fst listOfObjectsToObjectPool) - in if numIdsInFlight peerObjectState' > 0 + disObjectsAckedMultiplicities' = Map.unionWith (+) disObjectsAckedMultiplicities objectIdsToAck + disObjectsOwtPoolds' = + disObjectsOwtPoolds + <> Set.fromList (map fst listOf[(objectId, object)]) + in if dpsNumIdsInflight peerObjectState' > 0 then -- we have objectIds to request ( st - { stAcknowledged = stAcknowledged' - , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' + { disObjectsAckedMultiplicities = disObjectsAckedMultiplicities' + , disObjectsOwtPoolds = disObjectsOwtPoolds' } , ( (peerAddr, peerObjectState') , PeerDecision - { objectIdsToAcknowledge = numObjectIdsToAck - , objectIdsToRequest = numObjectIdsToReq - , objectPipelineObjectIds = + { pdIdsToAck = numObjectIdsToAck + , pdIdsToReq = numObjectIdsToReq + , pdCanPipelineIdsReq = not . StrictSeq.null - . outstandingFifo + . dpsOutstandingFifo $ peerObjectState' - , objectsToRequest = Map.empty - , objectsToObjectPool = objectsToObjectPool + , pdObjectsToReqIds = Map.empty + , pdObjectsOwtPool = pdObjectsOwtPool } ) ) @@ -222,10 +222,10 @@ pickObjectsToDownload ) ) else - let inFlightNum' :: NumObjects - objectsToRequestMap :: Set objectId + let dpsObjectsInflightIdsNum' :: NumObjects + pdObjectsToReqIdsMap :: Set objectId - (inFlightNum', objectsToRequestMap) = + (dpsObjectsInflightIdsNum', pdObjectsToReqIdsMap) = -- inner fold: fold available `objectId`s -- -- Note: although `Map.foldrWithKey` could be used here, it @@ -234,121 +234,121 @@ pickObjectsToDownload foldWithState ( \(objectId, (_objectSize, inflightMultiplicity)) sizeInflight -> if -- note that we pick `objectId`'s as long the `s` is - -- smaller or equal to `objectsNumInflightPerPeer`. - sizeInflight <= objectsNumInflightPerPeer + -- smaller or equal to `dpMaxNumObjectsInflightPerPeer`. + sizeInflight <= dpMaxNumObjectsInflightPerPeer -- overall `object`'s in-flight must be smaller than - -- `maxObjectsNumInflight` - && sizeInflight + sizeInflightOther <= maxObjectsNumInflight - -- the transaction must not be downloaded from more - -- than `objectInflightMultiplicity` peers simultaneously - && inflightMultiplicity < objectInflightMultiplicity + -- `dpMaxNumObjectsInflightTotal` + && sizeInflight + sizeInflightOther <= dpMaxNumObjectsInflightTotal + -- the object must not be downloaded from more + -- than `dpMaxObjectInflightMultiplicity` peers simultaneously + && inflightMultiplicity < dpMaxObjectInflightMultiplicity -- TODO: we must validate that `objectSize` is smaller than -- maximum objects size then Just (sizeInflight + objectSize, (objectId, objectSize)) else Nothing ) ( Map.assocs $ - -- merge `availableObjectIds` with `stInflight`, so we don't - -- need to lookup into `stInflight` on every `objectId` which - -- is in `availableObjectIds`. + -- merge `dpsIdsAvailable` with `disObjectsInflightMultiplicities`, so we don't + -- need to lookup into `disObjectsInflightMultiplicities` on every `objectId` which + -- is in `dpsIdsAvailable`. Map.merge (Map.mapMaybeMissing \_objectId -> Just . (,0)) Map.dropMissing (Map.zipWithMatched \_objectId -> (,)) - availableObjectIds - stInflight + dpsIdsAvailable + disObjectsInflightMultiplicities -- remove `object`s which were already downloaded by some -- other peer or are in-flight or unknown by this peer. - `Set.unions` ( Map.keysSet globalObtainedButNotAckedObjects - <> inFlight - <> requestedButNotReceived - <> stInSubmissionToObjectPoolObjects + `Set.unions` ( Map.keysSet dgsObjectsPending + <> dpsObjectsInflightIds + <> dpsObjectsRequestedButNotReceivedIds + <> disObjectsOwtPoolds ) ) - inFlightNum + dpsObjectsInflightIdsNum -- pick from `objectId`'s which are available from that given -- peer. Since we are folding a dictionary each `objectId` -- will be selected only once from a given peer (at least -- in each round). - objectsToRequest = Map.keysSet objectsToRequestMap - peerObjectState' = peerObjectState {inFlight = inFlight <> objectsToRequest} + pdObjectsToReqIds = Map.keysSet pdObjectsToReqIdsMap + peerObjectState' = peerObjectState {dpsObjectsInflightIds = dpsObjectsInflightIds <> pdObjectsToReqIds} ( numObjectIdsToAck , numObjectIdsToReq - , objectsToObjectPool@ObjectsToObjectPool{listOfObjectsToObjectPool} + , pdObjectsOwtPool@[(objectId, object)]{listOf[(objectId, object)]} , RefCountDiff{objectIdsToAck} , peerObjectState'' ) = acknowledgeObjectIds policy sharedState peerObjectState' - stAcknowledged' = Map.unionWith (+) stAcknowledged objectIdsToAck + disObjectsAckedMultiplicities' = Map.unionWith (+) disObjectsAckedMultiplicities objectIdsToAck stInflightDelta :: Map objectId Int - stInflightDelta = Map.fromSet (\_ -> 1) objectsToRequest + stInflightDelta = Map.fromSet (\_ -> 1) pdObjectsToReqIds -- note: this is right since every `objectId` -- could be picked at most once - stInflight' :: Map objectId Int - stInflight' = Map.unionWith (+) stInflightDelta stInflight + disObjectsInflightMultiplicities' :: Map objectId Int + disObjectsInflightMultiplicities' = Map.unionWith (+) stInflightDelta disObjectsInflightMultiplicities - stInSubmissionToObjectPoolObjects' = - stInSubmissionToObjectPoolObjects - <> Set.fromList (map fst listOfObjectsToObjectPool) - in if numIdsInFlight peerObjectState'' > 0 + disObjectsOwtPoolds' = + disObjectsOwtPoolds + <> Set.fromList (map fst listOf[(objectId, object)]) + in if dpsNumIdsInflight peerObjectState'' > 0 then -- we can request `objectId`s & `object`s - ( St - { stInflight = stInflight' - , stInflightNum = undefined - , stAcknowledged = stAcknowledged' - , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' + ( DecisionInternalState + { disObjectsInflightMultiplicities = disObjectsInflightMultiplicities' + , disNumObjectsInflight = undefined + , disObjectsAckedMultiplicities = disObjectsAckedMultiplicities' + , disObjectsOwtPoolds = disObjectsOwtPoolds' } , ( (peerAddr, peerObjectState'') , PeerDecision - { objectIdsToAcknowledge = numObjectIdsToAck - , objectPipelineObjectIds = + { pdIdsToAck = numObjectIdsToAck + , pdCanPipelineIdsReq = not . StrictSeq.null - . outstandingFifo + . dpsOutstandingFifo $ peerObjectState'' - , objectIdsToRequest = numObjectIdsToReq - , objectsToRequest = objectsToRequestMap - , objectsToObjectPool = objectsToObjectPool + , pdIdsToReq = numObjectIdsToReq + , pdObjectsToReqIds = pdObjectsToReqIdsMap + , pdObjectsOwtPool = pdObjectsOwtPool } ) ) else -- there are no `objectId`s to request, only `object`s. ( st - { stInflight = stInflight' - , stInSubmissionToObjectPoolObjects = stInSubmissionToObjectPoolObjects' + { disObjectsInflightMultiplicities = disObjectsInflightMultiplicities' + , disObjectsOwtPoolds = disObjectsOwtPoolds' } , ( (peerAddr, peerObjectState'') - , emptyPeerDecision{objectsToRequest = objectsToRequestMap} + , emptyPeerDecision{pdObjectsToReqIds = pdObjectsToReqIdsMap} ) ) gn :: - ( St peerAddr objectId object + ( DecisionInternalState peerAddr objectId object , [((peerAddr, DecisionPeerState objectId object), PeerDecision objectId object)] ) -> ( DecisionGlobalState peerAddr objectId object , [(peerAddr, PeerDecision objectId object)] ) gn - ( St - { stInflight - , stAcknowledged + ( DecisionInternalState + { disObjectsInflightMultiplicities + , disObjectsAckedMultiplicities } , as ) = - let peerStates' = + let dgsPeerStates' = Map.fromList ((\(a, _) -> a) <$> as) - <> peerStates + <> dgsPeerStates - referenceCounts' = + dgsObjectReferenceCounts' = Map.merge (Map.mapMaybeMissing \_ x -> Just x) (Map.mapMaybeMissing \_ _ -> assert False Nothing) @@ -357,35 +357,35 @@ pickObjectsToDownload then Just $! x - y else Nothing ) - referenceCounts - stAcknowledged + dgsObjectReferenceCounts + disObjectsAckedMultiplicities - liveSet = Map.keysSet referenceCounts' + liveSet = Map.keysSet dgsObjectReferenceCounts' - globalObtainedButNotAckedObjects' = - globalObtainedButNotAckedObjects + dgsObjectsPending' = + dgsObjectsPending `Map.restrictKeys` liveSet - globalToPoolObjects' = - List.foldl' updateInSubmissionToObjectPoolObjects globalToPoolObjects as + dgsObjectsOwtPool' = + List.foldl' updateInSubmissionToObjectPoolObjects dgsObjectsOwtPool as in ( sharedState - { peerStates = peerStates' - , globalInFlightObjects = stInflight - , globalObtainedButNotAckedObjects = globalObtainedButNotAckedObjects' - , referenceCounts = referenceCounts' - , globalToPoolObjects = globalToPoolObjects' + { dgsPeerStates = dgsPeerStates' + , dgsObjectsInflightMultiplicities = disObjectsInflightMultiplicities + , dgsObjectsPending = dgsObjectsPending' + , dgsObjectReferenceCounts = dgsObjectReferenceCounts' + , dgsObjectsOwtPool = dgsObjectsOwtPool' } , -- exclude empty results mapMaybe ( \((a, _), b) -> case b of PeerDecision - { objectIdsToAcknowledge = 0 - , objectIdsToRequest = 0 - , objectsToRequest - , objectsToObjectPool = ObjectsToObjectPool{listOfObjectsToObjectPool} + { pdIdsToAck = 0 + , pdIdsToReq = 0 + , pdObjectsToReqIds + , pdObjectsOwtPool = [(objectId, object)]{listOf[(objectId, object)]} } - | null objectsToRequest - , null listOfObjectsToObjectPool -> + | null pdObjectsToReqIds + , null listOf[(objectId, object)] -> Nothing _ -> Just (a, b) ) @@ -397,8 +397,8 @@ pickObjectsToDownload Map objectId Int -> (a, PeerDecision objectId object) -> Map objectId Int - updateInSubmissionToObjectPoolObjects m (_, PeerDecision{objectsToObjectPool}) = - List.foldl' fn m (listOfObjectsToObjectPool objectsToObjectPool) + updateInSubmissionToObjectPoolObjects m (_, PeerDecision{pdObjectsOwtPool}) = + List.foldl' fn m (listOf[(objectId, object)] pdObjectsOwtPool) where fn :: Map objectId Int -> @@ -418,67 +418,67 @@ filterActivePeers :: forall peerAddr objectId object. Ord objectId => HasCallStack => - PeerDecisionPolicy -> + DecisionPolicy -> DecisionGlobalState peerAddr objectId object -> Map peerAddr (DecisionPeerState objectId object) filterActivePeers - policy@PeerDecisionPolicy - { maxUnacknowledgedObjectIds - , objectInflightMultiplicity + policy@DecisionPolicy + { dpMaxNumObjectsOutstanding + , dpMaxObjectInflightMultiplicity } sharedObjectState@DecisionGlobalState - { peerStates - , globalObtainedButNotAckedObjects - , globalInFlightObjects - , globalToPoolObjects + { dgsPeerStates + , dgsObjectsPending + , dgsObjectsInflightMultiplicities + , dgsObjectsOwtPool } - = Map.filter gn peerStates + = Map.filter gn dgsPeerStates where unrequestable = - Map.keysSet (Map.filter (>= objectInflightMultiplicity) globalInFlightObjects) - <> Map.keysSet globalObtainedButNotAckedObjects + Map.keysSet (Map.filter (>= dpMaxObjectInflightMultiplicity) dgsObjectsInflightMultiplicities) + <> Map.keysSet dgsObjectsPending fn :: DecisionPeerState objectId object -> Bool fn peerObjectState@DecisionPeerState - { numIdsInFlight + { dpsNumIdsInflight } = - numIdsInFlight == 0 + dpsNumIdsInflight == 0 -- if a peer has objectIds in-flight, we cannot request more objectIds or objects. - && numIdsInFlight + numOfUnacked <= maxUnacknowledgedObjectIds - && objectIdsToRequest > 0 + && dpsNumIdsInflight + numOfUnacked <= dpMaxNumObjectsOutstanding + && pdIdsToReq > 0 where - -- Split `outstandingFifo'` into the longest prefix of `objectId`s which + -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. - (objectIdsToRequest, _, unackedObjectIds) = splitAcknowledgedObjectIds policy sharedObjectState peerObjectState + (pdIdsToReq, _, unackedObjectIds) = splitAcknowledgedObjectIds policy sharedObjectState peerObjectState numOfUnacked = fromIntegral (StrictSeq.length unackedObjectIds) gn :: DecisionPeerState objectId object -> Bool gn peerObjectState@DecisionPeerState - { outstandingFifo - , numIdsInFlight - , inFlight - , availableObjectIds - , requestedButNotReceived + { dpsOutstandingFifo + , dpsNumIdsInflight + , dpsObjectsInflightIds + , dpsIdsAvailable + , dpsObjectsRequestedButNotReceivedIds } = - ( numIdsInFlight == 0 - && numIdsInFlight + numOfUnacked <= maxUnacknowledgedObjectIds - && objectIdsToRequest > 0 + ( dpsNumIdsInflight == 0 + && dpsNumIdsInflight + numOfUnacked <= dpMaxNumObjectsOutstanding + && pdIdsToReq > 0 ) || (not (Set.null downloadable)) where - numOfUnacked = fromIntegral (StrictSeq.length outstandingFifo) + numOfUnacked = fromIntegral (StrictSeq.length dpsOutstandingFifo) downloadable = - availableObjectIds - `Set.difference` inFlight - `Set.difference` requestedButNotReceived + dpsIdsAvailable + `Set.difference` dpsObjectsInflightIds + `Set.difference` dpsObjectsRequestedButNotReceivedIds `Set.difference` unrequestable - `Set.difference` Map.keysSet globalToPoolObjects + `Set.difference` Map.keysSet dgsObjectsOwtPool - -- Split `outstandingFifo'` into the longest prefix of `objectId`s which + -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. - (objectIdsToRequest, _, _) = splitAcknowledgedObjectIds policy sharedObjectState peerObjectState + (pdIdsToReq, _, _) = splitAcknowledgedObjectIds policy sharedObjectState peerObjectState -- -- Auxiliary functions diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs index 244aa618d8..3d56653a89 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -1,59 +1,49 @@ {-# LANGUAGE ImportQualifiedPost #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy - ( PeerDecisionPolicy (..) - , NumObjects - , defaultPeerDecisionPolicy + ( DecisionPolicy (..) + , defaultDecisionPolicy -- * Re-exports , NumObjectIdsReq (..) ) where import Control.Monad.Class.MonadTime.SI -import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq (..)) - -newtype NumObjects = NumObjects Int - deriving Show +import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq (..), NumObjectsOutstanding, NumObjectsReq (..)) -- | Policy for making decisions -data PeerDecisionPolicy = PeerDecisionPolicy - { maxNumObjectIdsRequest :: !NumObjectIdsReq +data DecisionPolicy = DecisionPolicy + { dpMaxNumObjectIdsReq :: !NumObjectIdsReq -- ^ a maximal number of objectIds requested at once. - , maxUnacknowledgedObjectIds :: !NumObjectIdsReq - -- ^ maximal number of outstandingFifo. Measured in `NumObjectIdsReq` - -- since we enforce this policy by requesting not more objectIds than what - -- this limit allows. - , -- - -- Configuration of object decision logic. - -- - - objectsNumInflightPerPeer :: !NumObjects + , dpMaxNumObjectsOutstanding :: !NumObjectsOutstanding + -- ^ maximal number of dpsOutstandingFifo. + , dpMaxNumObjectsInflightPerPeer :: !NumObjectsReq -- ^ a limit of objects in-flight from a single peer, plus or minus 1. - , maxObjectsNumInflight :: !NumObjects + , dpMaxNumObjectsInflightTotal :: !NumObjectsReq -- ^ a limit of object size in-flight from all peers, plus or minus 1 - , objectInflightMultiplicity :: !Int + , dpMaxObjectInflightMultiplicity :: !Int -- ^ from how many peers download the `objectId` simultaneously - , globalObtainedButNotAckedObjectsMinLifetime :: !DiffTime - -- ^ how long OBJECTs that have been added to the objectpool will be - -- kept in the `globalObtainedButNotAckedObjects` cache. - , scoreRate :: !Double - -- ^ rate at which "rejected" OBJECTs drain. Unit: OBJECT/seconds. + , dpMinObtainedButNotAckedObjectsLifetime :: !DiffTime + -- ^ how long objects that have been added to the objectpool will be + -- kept in the `dgsObjectsPending` cache. + , dpScoreDrainRate :: !Double + -- ^ rate at which "rejected" objects drain. Unit: object/seconds. -- TODO: still relevant? - , scoreMax :: !Double + , dpScoreMaxRejections :: !Double -- ^ Maximum number of "rejections". Unit: seconds -- TODO: still relevant? } deriving Show -defaultPeerDecisionPolicy :: PeerDecisionPolicy -defaultPeerDecisionPolicy = - PeerDecisionPolicy - { maxNumObjectIdsRequest = 3 - , maxUnacknowledgedObjectIds = 10 -- must be the same as objectDiffusionMaxUnacked - , objectsNumInflightPerPeer = NumObjects 6 - , maxObjectsNumInflight = NumObjects 20 - , objectInflightMultiplicity = 2 - , globalObtainedButNotAckedObjectsMinLifetime = 2 - , scoreRate = 0.1 - , scoreMax = 15 * 60 +defaultDecisionPolicy :: DecisionPolicy +defaultDecisionPolicy = + DecisionPolicy + { dpMaxNumObjectIdsReq = 3 + , dpMaxNumObjectsOutstanding = 10 -- must be the same as objectDiffusionMaxUnacked + , dpMaxNumObjectsInflightPerPeer = NumObjectsReq 6 + , dpMaxNumObjectsInflightTotal = NumObjectsReq 20 + , dpMaxObjectInflightMultiplicity = 2 + , dpMinObtainedButNotAckedObjectsLifetime = 2 + , dpScoreDrainRate = 0.1 + , dpScoreMaxRejections = 15 * 60 } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index f5587190f9..49033f2c43 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -78,7 +78,7 @@ data PeerObjectAPI m objectId object = PeerObjectAPI -- \^ requested objectIds Map objectId object -> -- \^ received objects - m (Maybe ObjectDiffusionProtocolError) + m (Maybe ObjectDiffusionInboundError) -- ^ handle received objects , submitObjectToObjectPool :: Tracer m (TraceObjectDiffusionInbound objectId object) -> @@ -105,10 +105,10 @@ withPeer :: , Ord peerAddr , Show peerAddr ) => - Tracer m (TraceObjectLogic peerAddr objectId object) -> + Tracer m (TraceDecisionLogic peerAddr objectId object) -> ObjectChannelsVar m peerAddr objectId object -> ObjectObjectPoolSem m -> - PeerDecisionPolicy -> + DecisionPolicy -> DecisionGlobalStateVar m peerAddr objectId object -> ObjectDiffusionObjectPoolReader objectId object ticketNo m -> ObjectDiffusionObjectPoolWriter objectId object ticketNo m -> @@ -123,7 +123,7 @@ withPeer tracer channelsVar (ObjectObjectPoolSem objectpoolSem) - policy@PeerDecisionPolicy{globalObtainedButNotAckedObjectsMinLifetime} + policy@DecisionPolicy{dpMinObtainedButNotAckedObjectsLifetime} sharedStateVar ObjectDiffusionObjectPoolReader{objectpoolGetSnapshot} ObjectDiffusionObjectPoolWriter{objectpoolAddObjects} @@ -173,24 +173,24 @@ withPeer registerPeer :: DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object - registerPeer st@DecisionGlobalState{peerStates} = + registerPeer st@DecisionGlobalState{dgsPeerStates} = st - { peerStates = + { dgsPeerStates = Map.insert peerAddr DecisionPeerState - { availableObjectIds = Map.empty - , numIdsInFlight = 0 - , inFlightSize = 0 - , inFlight = Set.empty - , outstandingFifo = StrictSeq.empty - , requestedButNotReceived = Set.empty - , score = 0 - , scoreTs = Time 0 - , pendingObjects = Map.empty - , toPoolObjects = Map.empty + { dpsIdsAvailable = Map.empty + , dpsNumIdsInflight = 0 + , dpsObjectsInflightIdsSize = 0 + , dpsObjectsInflightIds = Set.empty + , dpsOutstandingFifo = StrictSeq.empty + , dpsObjectsRequestedButNotReceivedIds = Set.empty + , dpsScore = 0 + , dpsScoreLastUpdatedAt = Time 0 + , dpsObjectsPending = Map.empty + , dpsObjectsOwtPool = Map.empty } - peerStates + dgsPeerStates } -- TODO: this function needs to be tested! @@ -200,29 +200,29 @@ withPeer DecisionGlobalState peerAddr objectId object unregisterPeer st@DecisionGlobalState - { peerStates - , globalObtainedButNotAckedObjects - , referenceCounts - , globalInFlightObjects - , globalInFlightObjectsSize - , globalToPoolObjects + { dgsPeerStates + , dgsObjectsPending + , dgsObjectReferenceCounts + , dgsObjectsInflightMultiplicities + , dgsObjectsInflightMultiplicitiesSize + , dgsObjectsOwtPool } = st - { peerStates = peerStates' - , globalObtainedButNotAckedObjects = globalObtainedButNotAckedObjects' - , referenceCounts = referenceCounts' - , globalInFlightObjects = globalInFlightObjects' - , globalInFlightObjectsSize = globalInFlightObjectsSize' - , globalToPoolObjects = globalToPoolObjects' + { dgsPeerStates = dgsPeerStates' + , dgsObjectsPending = dgsObjectsPending' + , dgsObjectReferenceCounts = dgsObjectReferenceCounts' + , dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' + , dgsObjectsInflightMultiplicitiesSize = dgsObjectsInflightMultiplicitiesSize' + , dgsObjectsOwtPool = dgsObjectsOwtPool' } where ( DecisionPeerState - { outstandingFifo - , inFlight - , inFlightSize - , toPoolObjects + { dpsOutstandingFifo + , dpsObjectsInflightIds + , dpsObjectsInflightIdsSize + , dpsObjectsOwtPool } - , peerStates' + , dgsPeerStates' ) = Map.alterF ( \case @@ -230,38 +230,38 @@ withPeer Just a -> (a, Nothing) ) peerAddr - peerStates + dgsPeerStates - referenceCounts' = + dgsObjectReferenceCounts' = Foldable.foldl' ( flip $ Map.update \cnt -> if cnt > 1 then Just $! pred cnt else Nothing ) - referenceCounts - outstandingFifo + dgsObjectReferenceCounts + dpsOutstandingFifo - liveSet = Map.keysSet referenceCounts' + liveSet = Map.keysSet dgsObjectReferenceCounts' - globalObtainedButNotAckedObjects' = - globalObtainedButNotAckedObjects + dgsObjectsPending' = + dgsObjectsPending `Map.restrictKeys` liveSet - globalInFlightObjects' = Foldable.foldl' purgeInflightObjects globalInFlightObjects inFlight - globalInFlightObjectsSize' = globalInFlightObjectsSize - inFlightSize + dgsObjectsInflightMultiplicities' = Foldable.foldl' purgeInflightObjects dgsObjectsInflightMultiplicities dpsObjectsInflightIds + dgsObjectsInflightMultiplicitiesSize' = dgsObjectsInflightMultiplicitiesSize - dpsObjectsInflightIdsSize -- When we unregister a peer, we need to subtract all objects in the - -- `toPoolObjects`, as they will not be submitted to the objectpool. - globalToPoolObjects' = + -- `dpsObjectsOwtPool`, as they will not be submitted to the objectpool. + dgsObjectsOwtPool' = Foldable.foldl' ( flip $ Map.update \cnt -> if cnt > 1 then Just $! pred cnt else Nothing ) - globalToPoolObjects - (Map.keysSet toPoolObjects) + dgsObjectsOwtPool + (Map.keysSet dpsObjectsOwtPool) purgeInflightObjects m objectId = Map.alter fn objectId m where @@ -293,7 +293,7 @@ withPeer addObject = do mpSnapshot <- atomically objectpoolGetSnapshot - -- Note that checking if the objectpool contains a OBJECT before + -- Note that checking if the objectpool contains a object before -- spending several ms attempting to add it to the pool has -- been judged immoral. if objectpoolHasObject mpSnapshot objectId @@ -342,64 +342,64 @@ withPeer _ ObjectRejected st@DecisionGlobalState - { peerStates - , globalToPoolObjects + { dgsPeerStates + , dgsObjectsOwtPool } = st - { peerStates = peerStates' - , globalToPoolObjects = globalToPoolObjects' + { dgsPeerStates = dgsPeerStates' + , dgsObjectsOwtPool = dgsObjectsOwtPool' } where - globalToPoolObjects' = + dgsObjectsOwtPool' = Map.update (\case 1 -> Nothing; n -> Just $! pred n) objectId - globalToPoolObjects + dgsObjectsOwtPool - peerStates' = Map.update fn peerAddr peerStates + dgsPeerStates' = Map.update fn peerAddr dgsPeerStates where - fn ps = Just $! ps{toPoolObjects = Map.delete objectId (toPoolObjects ps)} + fn ps = Just $! ps{dpsObjectsOwtPool = Map.delete objectId (dpsObjectsOwtPool ps)} updateBufferedObject now ObjectAccepted st@DecisionGlobalState - { peerStates - , globalObtainedButNotAckedObjects - , referenceCounts - , globalRententionTimeouts - , globalToPoolObjects + { dgsPeerStates + , dgsObjectsPending + , dgsObjectReferenceCounts + , dgsRententionTimeouts + , dgsObjectsOwtPool } = st - { peerStates = peerStates' - , globalObtainedButNotAckedObjects = globalObtainedButNotAckedObjects' - , globalRententionTimeouts = globalRententionTimeouts' - , referenceCounts = referenceCounts' - , globalToPoolObjects = globalToPoolObjects' + { dgsPeerStates = dgsPeerStates' + , dgsObjectsPending = dgsObjectsPending' + , dgsRententionTimeouts = dgsRententionTimeouts' + , dgsObjectReferenceCounts = dgsObjectReferenceCounts' + , dgsObjectsOwtPool = dgsObjectsOwtPool' } where - globalToPoolObjects' = + dgsObjectsOwtPool' = Map.update (\case 1 -> Nothing; n -> Just $! pred n) objectId - globalToPoolObjects + dgsObjectsOwtPool - globalRententionTimeouts' = Map.alter fn (addTime globalObtainedButNotAckedObjectsMinLifetime now) globalRententionTimeouts + dgsRententionTimeouts' = Map.alter fn (addTime dpMinObtainedButNotAckedObjectsLifetime now) dgsRententionTimeouts where fn :: Maybe [objectId] -> Maybe [objectId] fn Nothing = Just [objectId] fn (Just objectIds) = Just $! (objectId : objectIds) - referenceCounts' = Map.alter fn objectId referenceCounts + dgsObjectReferenceCounts' = Map.alter fn objectId dgsObjectReferenceCounts where fn :: Maybe Int -> Maybe Int fn Nothing = Just 1 fn (Just n) = Just $! succ n - globalObtainedButNotAckedObjects' = Map.insert objectId (Just object) globalObtainedButNotAckedObjects + dgsObjectsPending' = Map.insert objectId (Just object) dgsObjectsPending - peerStates' = Map.update fn peerAddr peerStates + dgsPeerStates' = Map.update fn peerAddr dgsPeerStates where - fn ps = Just $! ps{toPoolObjects = Map.delete objectId (toPoolObjects ps)} + fn ps = Just $! ps{dpsObjectsOwtPool = Map.delete objectId (dpsObjectsOwtPool ps)} handleReceivedObjectIds :: NumObjectIdsReq -> @@ -421,12 +421,12 @@ withPeer -- \^ requested objectIds with their announced size Map objectId object -> -- \^ received objects - m (Maybe ObjectDiffusionProtocolError) + m (Maybe ObjectDiffusionInboundError) handleReceivedObjects objectIds objects = collectObjects tracer objectSize sharedStateVar peerAddr objectIds objects - -- Update `score` & `scoreTs` fields of `DecisionPeerState`, return the new - -- updated `score`. + -- Update `dpsScore` & `dpsScoreLastUpdatedAt` fields of `DecisionPeerState`, return the new + -- updated `dpsScore`. -- -- PRECONDITION: the `Double` argument is non-negative. countRejectedObjects :: @@ -437,33 +437,33 @@ withPeer | n < 0 = error ("ObjectDiffusion.countRejectedObjects: invariant violation for peer " ++ show peerAddr) countRejectedObjects now n = atomically $ stateTVar sharedStateVar $ \st -> - let (result, peerStates') = Map.alterF fn peerAddr (peerStates st) - in (result, st{peerStates = peerStates'}) + let (result, dgsPeerStates') = Map.alterF fn peerAddr (dgsPeerStates st) + in (result, st{dgsPeerStates = dgsPeerStates'}) where fn :: Maybe (DecisionPeerState objectId object) -> (Double, Maybe (DecisionPeerState objectId object)) fn Nothing = error ("ObjectDiffusion.withPeer: invariant violation for peer " ++ show peerAddr) - fn (Just ps) = (score ps', Just $! ps') + fn (Just ps) = (dpsScore ps', Just $! ps') where ps' = updateRejects policy now n ps updateRejects :: - PeerDecisionPolicy -> + DecisionPolicy -> Time -> Double -> DecisionPeerState objectId object -> DecisionPeerState objectId object -updateRejects _ now 0 pts | score pts == 0 = pts{scoreTs = now} +updateRejects _ now 0 pts | dpsScore pts == 0 = pts{dpsScoreLastUpdatedAt = now} updateRejects - PeerDecisionPolicy{scoreRate, scoreMax} + DecisionPolicy{dpScoreDrainRate, dpScoreMaxRejections} now n - pts@DecisionPeerState{score, scoreTs} = - let duration = diffTime now scoreTs - !drain = realToFrac duration * scoreRate - !drained = max 0 $ score - drain + pts@DecisionPeerState{dpsScore, dpsScoreLastUpdatedAt} = + let duration = diffTime now dpsScoreLastUpdatedAt + !drain = realToFrac duration * dpScoreDrainRate + !drained = max 0 $ dpsScore - drain in pts - { score = min scoreMax $ drained + n - , scoreTs = now + { dpsScore = min dpScoreMaxRejections $ drained + n + , dpsScoreLastUpdatedAt = now } drainRejectionThread :: @@ -473,8 +473,8 @@ drainRejectionThread :: , MonadThread m , Ord objectId ) => - Tracer m (TraceObjectLogic peerAddr objectId object) -> - PeerDecisionPolicy -> + Tracer m (TraceDecisionLogic peerAddr objectId object) -> + DecisionPolicy -> DecisionGlobalStateVar m peerAddr objectId object -> m Void drainRejectionThread tracer policy sharedStateVar = do @@ -494,13 +494,13 @@ drainRejectionThread tracer policy sharedStateVar = do st <- readTVar sharedStateVar let ptss = if now > nextDrain - then Map.map (updateRejects policy now 0) (peerStates st) - else peerStates st + then Map.map (updateRejects policy now 0) (dgsPeerStates st) + else dgsPeerStates st st' = tickTimedObjects now st - { peerStates = ptss + { dgsPeerStates = ptss } writeTVar sharedStateVar st' return st' @@ -521,9 +521,9 @@ decisionLogicThread :: , Ord objectId , Hashable peerAddr ) => - Tracer m (TraceObjectLogic peerAddr objectId object) -> + Tracer m (TraceDecisionLogic peerAddr objectId object) -> Tracer m ObjectDiffusionCounters -> - PeerDecisionPolicy -> + DecisionPolicy -> ObjectChannelsVar m peerAddr objectId object -> DecisionGlobalStateVar m peerAddr objectId object -> m Void @@ -557,7 +557,7 @@ decisionLogicThread tracer counterTracer policy objectChannelsVar sharedStateVar objectChannelMap decisions ) - traceWith counterTracer (mkObjectDiffusionCounters st) + traceWith counterTracer (makeObjectDiffusionCounters st) go -- Variant of modifyMVar_ that puts a default value if the MVar is empty. @@ -583,9 +583,9 @@ decisionLogicThreads :: , Ord objectId , Hashable peerAddr ) => - Tracer m (TraceObjectLogic peerAddr objectId object) -> + Tracer m (TraceDecisionLogic peerAddr objectId object) -> Tracer m ObjectDiffusionCounters -> - PeerDecisionPolicy -> + DecisionPolicy -> ObjectChannelsVar m peerAddr objectId object -> DecisionGlobalStateVar m peerAddr objectId object -> m Void diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 9b6c2e104e..2cc19a6d2d 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -57,14 +57,14 @@ acknowledgeObjectIds :: forall peerAddr object objectId. Ord objectId => HasCallStack => - PeerDecisionPolicy -> + DecisionPolicy -> DecisionGlobalState peerAddr objectId object -> DecisionPeerState objectId object -> -- | number of objectId to acknowledge, requests, objects which we can submit to the -- objectpool, objectIds to acknowledge with multiplicities, updated DecisionPeerState. ( NumObjectIdsAck , NumObjectIdsReq - , ObjectsToObjectPool objectId object + , [(objectId, object)] , RefCountDiff objectId , DecisionPeerState objectId object ) @@ -73,78 +73,78 @@ acknowledgeObjectIds policy sharedObjectState ps@DecisionPeerState - { availableObjectIds - , requestedButNotReceived - , numIdsInFlight - , pendingObjects - , score - , toPoolObjects + { dpsIdsAvailable + , dpsObjectsRequestedButNotReceivedIds + , dpsNumIdsInflight + , dpsObjectsPending + , dpsScore + , dpsObjectsOwtPool } = -- We can only acknowledge objectIds when we can request new ones, since -- a `MsgRequestObjectIds` for 0 objectIds is a protocol error. - if objectIdsToRequest > 0 + if pdIdsToReq > 0 then - ( objectIdsToAcknowledge - , objectIdsToRequest - , ObjectsToObjectPool objectsToObjectPool + ( pdIdsToAck + , pdIdsToReq + , [(objectId, object)] pdObjectsOwtPool , refCountDiff , ps - { outstandingFifo = outstandingFifo' - , availableObjectIds = availableObjectIds' - , requestedButNotReceived = requestedButNotReceived' - , numIdsInFlight = - numIdsInFlight - + objectIdsToRequest - , pendingObjects = pendingObjects' - , score = score' - , toPoolObjects = toPoolObjects' + { dpsOutstandingFifo = dpsOutstandingFifo' + , dpsIdsAvailable = dpsIdsAvailable' + , dpsObjectsRequestedButNotReceivedIds = dpsObjectsRequestedButNotReceivedIds' + , dpsNumIdsInflight = + dpsNumIdsInflight + + pdIdsToReq + , dpsObjectsPending = dpsObjectsPending' + , dpsScore = dpsScore' + , dpsObjectsOwtPool = dpsObjectsOwtPool' } ) else ( 0 , 0 - , ObjectsToObjectPool objectsToObjectPool + , [(objectId, object)] pdObjectsOwtPool , RefCountDiff Map.empty - , ps{toPoolObjects = toPoolObjects'} + , ps{dpsObjectsOwtPool = dpsObjectsOwtPool'} ) where - -- Split `outstandingFifo'` into the longest prefix of `objectId`s which + -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. - (objectIdsToRequest, acknowledgedObjectIds, outstandingFifo') = + (pdIdsToReq, acknowledgedObjectIds, dpsOutstandingFifo') = splitAcknowledgedObjectIds policy sharedObjectState ps - objectsToObjectPool = + pdObjectsOwtPool = [ (objectId, object) | objectId <- toList toObjectPoolObjectIds - , objectId `Map.notMember` globalObtainedButNotAckedObjects sharedObjectState - , object <- maybeToList $ objectId `Map.lookup` pendingObjects + , objectId `Map.notMember` dgsObjectsPending sharedObjectState + , object <- maybeToList $ objectId `Map.lookup` dpsObjectsPending ] (toObjectPoolObjectIds, _) = - StrictSeq.spanl (`Map.member` pendingObjects) acknowledgedObjectIds + StrictSeq.spanl (`Map.member` dpsObjectsPending) acknowledgedObjectIds - objectsToObjectPoolMap = Map.fromList objectsToObjectPool + pdObjectsOwtPoolMap = Map.fromList pdObjectsOwtPool - toPoolObjects' = toPoolObjects <> objectsToObjectPoolMap + dpsObjectsOwtPool' = dpsObjectsOwtPool <> pdObjectsOwtPoolMap - (pendingObjects', ackedDownloadedObjects) = Map.partitionWithKey (\objectId _ -> objectId `Set.member` liveSet) pendingObjects - -- latexObjects: transactions which were downloaded by another peer before we + (dpsObjectsPending', ackedDownloadedObjects) = Map.partitionWithKey (\objectId _ -> objectId `Set.member` liveSet) dpsObjectsPending + -- latexObjects: objects which were downloaded by another peer before we -- downloaded them; it relies on that `objectToObjectPool` filters out - -- `globalObtainedButNotAckedObjects`. + -- `dgsObjectsPending`. lateObjects = Map.filterWithKey - (\objectId _ -> objectId `Map.notMember` objectsToObjectPoolMap) + (\objectId _ -> objectId `Map.notMember` pdObjectsOwtPoolMap) ackedDownloadedObjects - score' = score + fromIntegral (Map.size lateObjects) + dpsScore' = dpsScore + fromIntegral (Map.size lateObjects) -- the set of live `objectIds` - liveSet = Set.fromList (toList outstandingFifo') - availableObjectIds' = availableObjectIds `Set.intersection` liveSet + liveSet = Set.fromList (toList dpsOutstandingFifo') + dpsIdsAvailable' = dpsIdsAvailable `Set.intersection` liveSet -- We remove all acknowledged `objectId`s which are not in - -- `outstandingFifo''`, but also return the unknown set before any - -- modifications (which is used to compute `outstandingFifo''` + -- `dpsOutstandingFifo''`, but also return the unknown set before any + -- modifications (which is used to compute `dpsOutstandingFifo''` -- above). - requestedButNotReceived' = requestedButNotReceived `Set.intersection` liveSet + dpsObjectsRequestedButNotReceivedIds' = dpsObjectsRequestedButNotReceivedIds `Set.intersection` liveSet refCountDiff = RefCountDiff $ @@ -157,55 +157,55 @@ acknowledgeObjectIds fn Nothing = Just 1 fn (Just n) = Just $! n + 1 - objectIdsToAcknowledge :: NumObjectIdsAck - objectIdsToAcknowledge = fromIntegral $ StrictSeq.length acknowledgedObjectIds + pdIdsToAck :: NumObjectIdsAck + pdIdsToAck = fromIntegral $ StrictSeq.length acknowledgedObjectIds -- | Split unacknowledged objectIds into acknowledged and unacknowledged parts, also -- return number of objectIds which can be requested. splitAcknowledgedObjectIds :: Ord objectId => HasCallStack => - PeerDecisionPolicy -> + DecisionPolicy -> DecisionGlobalState peer objectId object -> DecisionPeerState objectId object -> -- | number of objectIds to request, acknowledged objectIds, unacknowledged objectIds (NumObjectIdsReq, StrictSeq.StrictSeq objectId, StrictSeq.StrictSeq objectId) splitAcknowledgedObjectIds - PeerDecisionPolicy - { maxUnacknowledgedObjectIds - , maxNumObjectIdsRequest + DecisionPolicy + { dpMaxNumObjectsOutstanding + , dpMaxNumObjectIdsReq } DecisionGlobalState - { globalObtainedButNotAckedObjects + { dgsObjectsPending } DecisionPeerState - { outstandingFifo - , requestedButNotReceived - , pendingObjects - , inFlight - , numIdsInFlight + { dpsOutstandingFifo + , dpsObjectsRequestedButNotReceivedIds + , dpsObjectsPending + , dpsObjectsInflightIds + , dpsNumIdsInflight } = - (objectIdsToRequest, acknowledgedObjectIds', outstandingFifo') + (pdIdsToReq, acknowledgedObjectIds', dpsOutstandingFifo') where - (acknowledgedObjectIds', outstandingFifo') = + (acknowledgedObjectIds', dpsOutstandingFifo') = StrictSeq.spanl ( \objectId -> - ( objectId `Map.member` globalObtainedButNotAckedObjects - || objectId `Set.member` requestedButNotReceived - || objectId `Map.member` pendingObjects + ( objectId `Map.member` dgsObjectsPending + || objectId `Set.member` dpsObjectsRequestedButNotReceivedIds + || objectId `Map.member` dpsObjectsPending ) - && objectId `Set.notMember` inFlight + && objectId `Set.notMember` dpsObjectsInflightIds ) - outstandingFifo - numOfUnacked = StrictSeq.length outstandingFifo + dpsOutstandingFifo + numOfUnacked = StrictSeq.length dpsOutstandingFifo numOfAcked = StrictSeq.length acknowledgedObjectIds' - unackedAndRequested = fromIntegral numOfUnacked + numIdsInFlight + unackedAndRequested = fromIntegral numOfUnacked + dpsNumIdsInflight - objectIdsToRequest = - assert (unackedAndRequested <= maxUnacknowledgedObjectIds) $ - assert (numIdsInFlight <= maxNumObjectIdsRequest) $ - (maxUnacknowledgedObjectIds - unackedAndRequested + fromIntegral numOfAcked) - `min` (maxNumObjectIdsRequest - numIdsInFlight) + pdIdsToReq = + assert (unackedAndRequested <= dpMaxNumObjectsOutstanding) $ + assert (dpsNumIdsInflight <= dpMaxNumObjectIdsReq) $ + (dpMaxNumObjectsOutstanding - unackedAndRequested + fromIntegral numOfAcked) + `min` (dpMaxNumObjectIdsReq - dpsNumIdsInflight) -- | `RefCountDiff` represents a map of `objectId` which can be acknowledged -- together with their multiplicities. @@ -218,7 +218,7 @@ updateRefCounts :: Map objectId Int -> RefCountDiff objectId -> Map objectId Int -updateRefCounts referenceCounts (RefCountDiff diff) = +updateRefCounts dgsObjectReferenceCounts (RefCountDiff diff) = Map.merge (Map.mapMaybeMissing \_ x -> Just x) (Map.mapMaybeMissing \_ _ -> Nothing) @@ -229,7 +229,7 @@ updateRefCounts referenceCounts (RefCountDiff diff) = then Just $! x - y else Nothing ) - referenceCounts + dgsObjectReferenceCounts diff tickTimedObjects :: @@ -241,12 +241,12 @@ tickTimedObjects :: tickTimedObjects now st@DecisionGlobalState - { globalRententionTimeouts - , referenceCounts - , globalObtainedButNotAckedObjects + { dgsRententionTimeouts + , dgsObjectReferenceCounts + , dgsObjectsPending } = - let (expiredObjects', globalRententionTimeouts') = - case Map.splitLookup now globalRententionTimeouts of + let (expiredObjects', dgsRententionTimeouts') = + case Map.splitLookup now dgsRententionTimeouts of (expired, Just objectIds, timed) -> ( expired -- Map.split doesn't include the `now` entry in the map , Map.insert now objectIds timed @@ -254,13 +254,13 @@ tickTimedObjects (expired, Nothing, timed) -> (expired, timed) refDiff = Map.foldl' fn Map.empty expiredObjects' - referenceCounts' = updateRefCounts referenceCounts (RefCountDiff refDiff) - liveSet = Map.keysSet referenceCounts' - globalObtainedButNotAckedObjects' = globalObtainedButNotAckedObjects `Map.restrictKeys` liveSet + dgsObjectReferenceCounts' = updateRefCounts dgsObjectReferenceCounts (RefCountDiff refDiff) + liveSet = Map.keysSet dgsObjectReferenceCounts' + dgsObjectsPending' = dgsObjectsPending `Map.restrictKeys` liveSet in st - { globalRententionTimeouts = globalRententionTimeouts' - , referenceCounts = referenceCounts' - , globalObtainedButNotAckedObjects = globalObtainedButNotAckedObjects' + { dgsRententionTimeouts = dgsRententionTimeouts' + , dgsObjectReferenceCounts = dgsObjectReferenceCounts' + , dgsObjectsPending = dgsObjectsPending' } where fn :: @@ -295,7 +295,7 @@ receivedObjectIdsImpl :: (objectId -> Bool) -> peerAddr -> -- | number of requests to subtract from - -- `numIdsInFlight` + -- `dpsNumIdsInflight` NumObjectIdsReq -> -- | sequence of received `objectIds` StrictSeq objectId -> @@ -310,17 +310,17 @@ receivedObjectIdsImpl objectIdsSeq objectIdsSet st@DecisionGlobalState - { peerStates - , globalObtainedButNotAckedObjects - , referenceCounts + { dgsPeerStates + , dgsObjectsPending + , dgsObjectReferenceCounts } = -- using `alterF` so the update of `DecisionPeerState` is done in one lookup case Map.alterF (fmap Just . fn . fromJust) peerAddr - peerStates of - (st', peerStates') -> - st'{peerStates = peerStates'} + dgsPeerStates of + (st', dgsPeerStates') -> + st'{dgsPeerStates = dgsPeerStates'} where -- update `DecisionPeerState` and return number of `objectId`s to acknowledged and -- updated `DecisionGlobalState`. @@ -331,9 +331,9 @@ receivedObjectIdsImpl ) fn ps@DecisionPeerState - { availableObjectIds - , numIdsInFlight - , outstandingFifo + { dpsIdsAvailable + , dpsNumIdsInflight + , dpsOutstandingFifo } = (st', ps') where @@ -343,34 +343,34 @@ receivedObjectIdsImpl -- Divide the new objectIds in two: those that are already in the objectpool -- and those that are not. We'll request some objects from the latter. - (ignoredObjectIds, availableObjectIdsSet) = + (ignoredObjectIds, dpsIdsAvailableSet) = Set.partition objectpoolHasObject objectIdsSet - -- Add all `objectIds` from `availableObjectIdsMap` which are not + -- Add all `objectIds` from `dpsIdsAvailableMap` which are not -- unacknowledged or already buffered. Unacknowledged objectIds must have - -- already been added to `availableObjectIds` map before. - availableObjectIds' = + -- already been added to `dpsIdsAvailable` map before. + dpsIdsAvailable' = Set.foldl (\m objectId -> Set.insert objectId m) - availableObjectIds + dpsIdsAvailable ( Set.filter ( \objectId -> - objectId `notElem` outstandingFifo - && objectId `Map.notMember` globalObtainedButNotAckedObjects + objectId `notElem` dpsOutstandingFifo + && objectId `Map.notMember` dgsObjectsPending ) - availableObjectIdsSet + dpsIdsAvailableSet ) - -- Add received objectIds to `outstandingFifo`. - outstandingFifo' = outstandingFifo <> objectIdsSeq + -- Add received objectIds to `dpsOutstandingFifo`. + dpsOutstandingFifo' = dpsOutstandingFifo <> objectIdsSeq -- Add ignored `objects` to buffered ones. - -- Note: we prefer to keep the `object` if it's already in `globalObtainedButNotAckedObjects`. - globalObtainedButNotAckedObjects' = - globalObtainedButNotAckedObjects + -- Note: we prefer to keep the `object` if it's already in `dgsObjectsPending`. + dgsObjectsPending' = + dgsObjectsPending <> Map.fromList ((, Nothing) <$> Set.toList ignoredObjectIds) - referenceCounts' = + dgsObjectReferenceCounts' = Foldable.foldl' ( flip $ Map.alter @@ -379,21 +379,21 @@ receivedObjectIdsImpl Just cnt -> Just $! succ cnt ) ) - referenceCounts + dgsObjectReferenceCounts objectIdsSeq st' = st - { globalObtainedButNotAckedObjects = globalObtainedButNotAckedObjects' - , referenceCounts = referenceCounts' + { dgsObjectsPending = dgsObjectsPending' + , dgsObjectReferenceCounts = dgsObjectReferenceCounts' } ps' = assert - (numIdsInFlight >= reqNo) + (dpsNumIdsInflight >= reqNo) ps - { availableObjectIds = availableObjectIds' - , outstandingFifo = outstandingFifo' - , numIdsInFlight = numIdsInFlight - reqNo + { dpsIdsAvailable = dpsIdsAvailable' + , dpsOutstandingFifo = dpsOutstandingFifo' + , dpsNumIdsInflight = dpsNumIdsInflight - reqNo } -- | We check advertised sizes up in a fuzzy way. The advertised and received @@ -420,26 +420,26 @@ collectObjectsImpl :: -- If one of the `object` has wrong size, we return an error. The -- mini-protocol will throw, which will clean the state map from this peer. Either - ObjectDiffusionProtocolError + ObjectDiffusionInboundError (DecisionGlobalState peerAddr objectId object) collectObjectsImpl objectSize peerAddr requestedObjectIdsMap receivedObjects - st@DecisionGlobalState{peerStates} = + st@DecisionGlobalState{dgsPeerStates} = -- using `alterF` so the update of `DecisionPeerState` is done in one lookup case Map.alterF (fmap Just . fn . fromJust) peerAddr - peerStates of - (Right st', peerStates') -> - Right st'{peerStates = peerStates'} + dgsPeerStates of + (Right st', dgsPeerStates') -> + Right st'{dgsPeerStates = dgsPeerStates'} (Left e, _) -> Left $ ProtocolErrorObjectSizeError e where -- Update `DecisionPeerState` and partially update `DecisionGlobalState` (except of - -- `peerStates`). + -- `dgsPeerStates`). fn :: DecisionPeerState objectId object -> ( Either @@ -485,16 +485,16 @@ collectObjectsImpl requestedObjectIds = Map.keysSet requestedObjectIdsMap notReceived = requestedObjectIds Set.\\ Map.keysSet receivedObjects - pendingObjects' = pendingObjects ps <> receivedObjects - -- Add not received objects to `requestedButNotReceived` before acknowledging objectIds. - requestedButNotReceived' = requestedButNotReceived ps <> notReceived + dpsObjectsPending' = dpsObjectsPending ps <> receivedObjects + -- Add not received objects to `dpsObjectsRequestedButNotReceivedIds` before acknowledging objectIds. + dpsObjectsRequestedButNotReceivedIds' = dpsObjectsRequestedButNotReceivedIds ps <> notReceived - inFlight' = - assert (requestedObjectIds `Set.isSubsetOf` inFlight ps) $ - inFlight ps Set.\\ requestedObjectIds + dpsObjectsInflightIds' = + assert (requestedObjectIds `Set.isSubsetOf` dpsObjectsInflightIds ps) $ + dpsObjectsInflightIds ps Set.\\ requestedObjectIds -- subtract requested from in-flight - globalInFlightObjects'' = + dgsObjectsInflightMultiplicities'' = Map.merge (Map.mapMaybeMissing \_ x -> Just x) (Map.mapMaybeMissing \_ _ -> assert False Nothing) @@ -506,43 +506,43 @@ collectObjectsImpl then Just z else Nothing ) - (globalInFlightObjects st) + (dgsObjectsInflightMultiplicities st) (Map.fromSet (const 1) requestedObjectIds) st' = st - { globalInFlightObjects = globalInFlightObjects'' + { dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities'' } -- -- Update DecisionPeerState -- - -- Remove the downloaded `objectId`s from the availableObjectIds map, this + -- Remove the downloaded `objectId`s from the dpsIdsAvailable map, this -- guarantees that we won't attempt to download the `objectIds` from this peer -- once we collect the `objectId`s. Also restrict keys to `liveSet`. -- - -- NOTE: we could remove `notReceived` from `availableObjectIds`; and - -- possibly avoid using `requestedButNotReceived` field at all. + -- NOTE: we could remove `notReceived` from `dpsIdsAvailable`; and + -- possibly avoid using `dpsObjectsRequestedButNotReceivedIds` field at all. -- - availableObjectIds'' = availableObjectIds ps `Set.difference` requestedObjectIds + dpsIdsAvailable'' = dpsIdsAvailable ps `Set.difference` requestedObjectIds -- Remove all acknowledged `objectId`s from unknown set, but only those - -- which are not present in `outstandingFifo'` - requestedButNotReceived'' = - requestedButNotReceived' + -- which are not present in `dpsOutstandingFifo'` + dpsObjectsRequestedButNotReceivedIds'' = + dpsObjectsRequestedButNotReceivedIds' `Set.intersection` live where -- We cannot use `liveSet` as `unknown <> notReceived` might -- contain `objectIds` which are in `liveSet` but are not `live`. - live = Set.fromList (toList (outstandingFifo ps)) + live = Set.fromList (toList (dpsOutstandingFifo ps)) ps'' = ps - { availableObjectIds = availableObjectIds'' - , requestedButNotReceived = requestedButNotReceived'' - , inFlight = inFlight' - , pendingObjects = pendingObjects' + { dpsIdsAvailable = dpsIdsAvailable'' + , dpsObjectsRequestedButNotReceivedIds = dpsObjectsRequestedButNotReceivedIds'' + , dpsObjectsInflightIds = dpsObjectsInflightIds' + , dpsObjectsPending = dpsObjectsPending' } -- @@ -559,13 +559,13 @@ newDecisionGlobalStateVar :: newDecisionGlobalStateVar rng = newTVarIO DecisionGlobalState - { peerStates = Map.empty - , globalInFlightObjects = Map.empty - , globalObtainedButNotAckedObjects = Map.empty - , referenceCounts = Map.empty - , globalRententionTimeouts = Map.empty - , globalToPoolObjects = Map.empty - , orderRng = rng + { dgsPeerStates = Map.empty + , dgsObjectsInflightMultiplicities = Map.empty + , dgsObjectsPending = Map.empty + , dgsObjectReferenceCounts = Map.empty + , dgsRententionTimeouts = Map.empty + , dgsObjectsOwtPool = Map.empty + , dgsRng = rng } -- | Acknowledge `objectId`s, return the number of `objectIds` to be acknowledged to the @@ -573,12 +573,12 @@ newDecisionGlobalStateVar rng = receivedObjectIds :: forall m peerAddr ticketNo object objectId. (MonadSTM m, Ord objectId, Ord peerAddr) => - Tracer m (TraceObjectLogic peerAddr objectId object) -> + Tracer m (TraceDecisionLogic peerAddr objectId object) -> DecisionGlobalStateVar m peerAddr objectId object -> ObjectPoolWriter objectId object m -> peerAddr -> -- | number of requests to subtract from - -- `numIdsInFlight` + -- `dpsNumIdsInflight` NumObjectIdsReq -> -- | sequence of received `objectIds` StrictSeq objectId -> @@ -603,7 +603,7 @@ collectObjects :: , Show objectId , Typeable objectId ) => - Tracer m (TraceObjectLogic peerAddr objectId object) -> + Tracer m (TraceDecisionLogic peerAddr objectId object) -> (object -> SizeInBytes) -> DecisionGlobalStateVar m peerAddr objectId object -> peerAddr -> @@ -613,7 +613,7 @@ collectObjects :: Map objectId object -> -- | number of objectIds to be acknowledged and objects to be added to the -- objectpool - m (Maybe ObjectDiffusionProtocolError) + m (Maybe ObjectDiffusionInboundError) collectObjects tracer objectSize sharedVar peerAddr objectIdsRequested objectsMap = do r <- atomically $ do st <- readTVar sharedVar diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index cc5f805ccf..535dd7b3ba 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -3,12 +3,9 @@ {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} -{-# LANGUAGE GeneralisedNewtypeDeriving #-} {-# LANGUAGE ImportQualifiedPost #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE StandaloneDeriving #-} -{-# LANGUAGE TypeApplications #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types ( -- * DecisionPeerState @@ -18,29 +15,22 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types , DecisionGlobalState (..) -- * Decisions - , ObjectsToObjectPool (..) , PeerDecision (..) , emptyPeerDecision - , TraceObjectLogic (..) - , ObjectDiffusionInitDelay (..) - , defaultObjectDiffusionInitDelay - - -- * Types shared with V1 + , TraceDecisionLogic (..) - -- ** Various - , ProcessedObjectCount (..) - , ObjectDiffusionLogicVersion (..) + -- * Reporting + , ObjectDiffusionCounters (..) + , makeObjectDiffusionCounters - -- ** ObjectPool API - , ObjectDiffusionObjectPoolWriter (..) + -- * Init delay + , ObjectDiffusionInitDelay (..) + , defaultObjectDiffusionInitDelay - -- ** Traces + -- * Copied from V1 + , NumObjectsProcessed (..) , TraceObjectDiffusionInbound (..) - , ObjectDiffusionCounters (..) - , mkObjectDiffusionCounters - - -- ** Protocol Error - , ObjectDiffusionProtocolError (..) + , ObjectDiffusionInboundError (..) ) where import Control.Exception (Exception (..)) @@ -51,63 +41,59 @@ import Data.Monoid (Sum (..)) import Data.Sequence.Strict (StrictSeq) import Data.Set (Set) import Data.Set qualified as Set -import Data.Typeable (Typeable, eqT, (:~:) (Refl)) import GHC.Generics (Generic) import NoThunks.Class (NoThunks (..)) import Ouroboros.Network.Protocol.ObjectDiffusion.Type import System.Random (StdGen) - --- | Flag to enable/disable the usage of the new object-submission logic. -data ObjectDiffusionLogicVersion - = -- | the legacy `Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1` - ObjectDiffusionLogicV1 - | -- | the new `Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2` - ObjectDiffusionLogicV2 - deriving (Eq, Show, Enum, Bounded) +import Data.Word (Word64) +import Ouroboros.Network.ControlMessage (ControlMessage) -- -- DecisionPeerState, DecisionGlobalState -- data DecisionPeerState objectId object = DecisionPeerState - { outstandingFifo :: !(StrictSeq objectId) - -- ^ Those transactions (by their identifier) that the client has told + { dpsOutstandingFifo :: !(StrictSeq objectId) + -- ^ Those objects (by their identifier) that the client has told -- us about, and which we have not yet acknowledged. This is kept in -- the order in which the client gave them to us. This is the same order - -- in which we submit them to the objectpool (or for this example, the final - -- result order). It is also the order we acknowledge in. - , availableObjectIds :: !(Set objectId) - -- ^ Set of known transaction ids which can be requested from this peer. - , numIdsInFlight :: !NumObjectIdsReq - -- ^ The number of transaction identifiers that we have requested but + -- in which we submit them to the objectpool. It is also the order + -- in which we acknowledge them. + , dpsIdsAvailable :: !(Set objectId) + -- ^ Set of known object ids which can be requested from this peer. + , dpsNumIdsInflight :: !NumObjectIdsReq + -- ^ The number of object identifiers that we have requested but -- which have not yet been replied to. We need to track this it keep -- our requests within the limit on the number of unacknowledged objectIds. - , inFlight :: !(Set objectId) - -- ^ The set of requested `objectId`s. - , requestedButNotReceived :: !(Set objectId) - -- ^ A subset of `outstandingFifo` which were unknown to the peer + , dpsObjectsInflightIds :: !(Set objectId) + -- ^ The set of requested objects (by their ids). + -- , dpsObjectsRequestedButNotReceivedIds :: !(Set objectId) + -- ^ A subset of `dpsOutstandingFifo` which were unknown to the peer -- (i.e. requested but not received). We need to track these `objectId`s -- since they need to be acknowledged. -- - -- We track these `objectId` per peer, rather than in `globalObtainedButNotAckedObjects` map, + -- We track these `objectId` per peer, rather than in `dgsObjectsPending` map, -- since that could potentially lead to corrupting the node, not being -- able to download a `object` which is needed & available from other nodes. - , score :: !Double + -- TODO: for object diffusion, every requested object must be received, so + -- we don't need to track this. But we should disconnect if the peer hasn't + -- sent us exactly the requested object. + , dpsScore :: !Double -- ^ Score is a metric that tracks how usefull a peer has been. -- The larger the value the less usefull peer. It slowly decays towards -- zero. - , scoreTs :: !Time - -- ^ Timestamp for the last time `score` was drained. - , pendingObjects :: !(Map objectId object) - -- ^ A set of OBJECTs downloaded from the peer. They are not yet + , dpsScoreLastUpdatedAt :: !Time + -- ^ Timestamp for the last time `dpsScore` was updated. + , dpsObjectsPending :: !(Map objectId object) + -- ^ A set of objects downloaded from the peer. They are not yet -- acknowledged and haven't been sent to the objectpool yet. -- -- Life cycle of entries: - -- * added when a object is downloaded (see `collectObjectsImpl`) - -- * follows `outstandingFifo` (see `acknowledgeObjectIds`) - , toPoolObjects :: !(Map objectId object) - -- ^ A set of OBJECTs on their way to the objectpool. - -- Tracked here so that we can cleanup `globalToPoolObjects` if the + -- * added when a object is downloaded in `collectObjectsImpl` + -- * removed by `acknowledgeObjectIds` (to properly follow `dpsOutstandingFifo`) + , dpsObjectsOwtPool :: !(Map objectId object) + -- ^ A set of objects on their way to the objectpool. + -- Tracked here so that we can cleanup `dgsObjectsOwtPool` if the -- peer dies. -- -- Life cycle of entries: @@ -125,87 +111,78 @@ instance -- | Shared state of all `ObjectDiffusion` clients. -- --- New `objectId` enters `outstandingFifo` it is also added to `availableObjectIds` --- and `referenceCounts` (see `acknowledgeObjectIdsImpl`). +-- New `objectId` enters `dpsOutstandingFifo` it is also added to `dpsIdsAvailable` +-- and `dgsObjectReferenceCounts` (see `acknowledgeObjectIdsImpl`). -- --- When a `objectId` id is selected to be downloaded, it's added to --- `inFlightSize` (see --- `Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.Decision.pickObjectsToDownload`). +-- When the requested object arrives, the corresponding entry is removed from `dgsObjectsInflightMultiplicities` and it is added to `dgsObjectsPending` (see `collectObjectsImpl`). -- --- When the request arrives, the `objectId` is removed from `globalInFlightObjects`. It --- might be added to `requestedButNotReceived` if the server didn't have that `objectId`, or --- it's added to `globalObtainedButNotAckedObjects` (see `collectObjectsImpl`). --- --- Whenever we choose `objectId` to acknowledge (either in `acknowledobjectsIdsImpl`, +-- Whenever we choose an `objectId` to acknowledge (either in `acknowledObjectsIds`, -- `collectObjectsImpl` or --- `Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.Decision.pickObjectsToDownload`, we also --- recalculate `referenceCounts` and only keep live `objectId`s in other maps (e.g. --- `availableObjectIds`, `globalObtainedButNotAckedObjects`, `requestedButNotReceived`). +-- `pickObjectsToDownload`, we also +-- recalculate `dgsObjectReferenceCounts` and only keep live `objectId`s in other maps (e.g. +-- `dpsIdsAvailable`, `dgsObjectsPending`). data DecisionGlobalState peerAddr objectId object = DecisionGlobalState - { peerStates :: !(Map peerAddr (DecisionPeerState objectId object)) + { dgsPeerStates :: !(Map peerAddr (DecisionPeerState objectId object)) -- ^ Map of peer states. -- -- /Invariant:/ for peerAddr's which are registered using `withPeer`, -- there's always an entry in this map even if the set of `objectId`s is -- empty. - , globalInFlightObjects :: !(Map objectId Int) - -- ^ Set of transactions which are in-flight (have already been - -- requested) together with multiplicities (from how many peers it is + , dgsObjectsInflightMultiplicities :: !(Map objectId Int) + -- ^ Map from object ids of objects which are in-flight (have already been + -- requested) to their multiplicities (from how many peers it is -- currently in-flight) -- - -- This set can intersect with `availableObjectIds`. - , globalObtainedButNotAckedObjects :: !(Map objectId (Maybe object)) + -- This can intersect with `dpsIdsAvailable`. + , dgsObjectsPending :: !(Map objectId (Maybe object)) -- ^ Map of `object` which: -- -- * were downloaded and added to the objectpool, -- * are already in the objectpool (`Nothing` is inserted in that case), -- -- We only keep live `objectId`, e.g. ones which `objectId` is unacknowledged by - -- at least one peer or has a `globalRententionTimeouts` entry. - -- - -- /Note:/ `objectId`s which `object` were unknown by a peer are tracked - -- separately in `requestedButNotReceived`. + -- at least one peer or has a `dgsRententionTimeouts` entry. -- -- /Note:/ previous implementation also needed to explicitly track -- `objectId`s which were already acknowledged, but are still unacknowledged. -- In this implementation, this is done using reference counting. -- - -- This map is useful to acknowledge `objectId`s, it's basically taking the - -- longest prefix which contains entries in `globalObtainedButNotAckedObjects` or `requestedButNotReceived`. - , referenceCounts :: !(Map objectId Int) - -- ^ We track reference counts of all unacknowledged and globalRententionTimeouts objectIds. - -- Once the count reaches 0, a object is removed from `globalObtainedButNotAckedObjects`. + -- This map is useful to acknowledge `objectId`s: it's basically taking the + -- longest prefix which contains entries in `dgsObjectsPending` + , dgsObjectReferenceCounts :: !(Map objectId Int) + -- ^ We track reference counts of all unacknowledged and dgsRententionTimeouts objectIds. + -- Once the count reaches 0, a object is removed from `dgsObjectsPending`. -- - -- The `bufferedObject` map contains a subset of `objectId` which - -- `referenceCounts` contains. + -- The `dgsObjectsOwtPool` map contains a subset of `objectId` which + -- `dgsObjectReferenceCounts` contains. -- -- /Invariants:/ -- -- * the objectId count is equal to multiplicity of objectId in all - -- `outstandingFifo` sequences; - -- * @Map.keysSet globalObtainedButNotAckedObjects `Set.isSubsetOf` Map.keysSet referenceCounts@; + -- `dpsOutstandingFifo` sequences; + -- * @Map.keysSet dgsObjectsPending `Set.isSubsetOf` Map.keysSet dgsObjectReferenceCounts@; -- * all counts are positive integers. - , globalRententionTimeouts :: !(Map Time [objectId]) - -- ^ A set of timeouts for objectIds that have been added to globalObtainedButNotAckedObjects after being + , dgsRententionTimeouts :: !(Map Time [objectId]) + -- ^ A set of timeouts for objectIds that have been added to dgsObjectsPending after being -- inserted into the objectpool. -- -- We need these short timeouts to avoid re-downloading a `object`. We could -- acknowledge this `objectId` to all peers, when a peer from another -- continent presents us it again. -- - -- Every objectId entry has a reference count in `referenceCounts`. - , globalToPoolObjects :: !(Map objectId Int) + -- Every objectId entry has a reference count in `dgsObjectReferenceCounts`. + , dgsObjectsOwtPool :: !(Map objectId Int) -- ^ A set of objectIds that have been downloaded by a peer and are on their - -- way to the objectpool. We won't issue further fetch-requests for OBJECTs in + -- way to the objectpool. We won't issue further fetch-requests for objects in -- this state. We track these objects to not re-download them from another -- peer. -- -- * We subtract from the counter when a given object is added or rejected by - -- the objectpool or do that for all objects in `toPoolObjects` when a peer is + -- the objectpool or do that for all objects in `dpsObjectsOwtPool` when a peer is -- unregistered. -- * We add to the counter when a given object is selected to be added to the -- objectpool in `pickObjectsToDownload`. - , orderRng :: !StdGen + , dgsRng :: !StdGen -- ^ Rng used to randomly order peers } deriving (Eq, Show, Generic) @@ -222,9 +199,6 @@ instance -- Decisions -- -newtype ObjectsToObjectPool objectId object = ObjectsToObjectPool {listOfObjectsToObjectPool :: [(objectId, object)]} - deriving newtype (Eq, Show, Semigroup, Monoid) - -- | Decision made by the decision logic. Each peer will receive a 'Decision'. -- -- /note:/ it is rather non-standard to represent a choice between requesting @@ -237,16 +211,16 @@ newtype ObjectsToObjectPool objectId object = ObjectsToObjectPool {listOfObjects -- expensive `makeDecision` computation will not need to take that peer into -- account. data PeerDecision objectId object = PeerDecision - { objectIdsToAcknowledge :: !NumObjectIdsAck + { pdIdsToAck :: !NumObjectIdsAck -- ^ objectId's to acknowledge - , objectIdsToRequest :: !NumObjectIdsReq + , pdIdsToReq :: !NumObjectIdsReq -- ^ number of objectId's to request - , objectPipelineObjectIds :: !Bool + , pdCanPipelineIdsReq :: !Bool -- ^ the object-submission protocol only allows to pipeline `objectId`'s requests -- if we have non-acknowledged `objectId`s. - , objectsToRequest :: !(Map objectId SizeInBytes) + , pdObjectsToReqIds :: !(Set objectId) -- ^ objectId's to download. - , objectsToObjectPool :: !(ObjectsToObjectPool objectId object) + , pdObjectsOwtPool :: !(Map objectId object) -- ^ list of `object`s to submit to the objectpool. } deriving (Show, Eq) @@ -257,161 +231,81 @@ data PeerDecision objectId object = PeerDecision -- `DecisionPeerState` is updated. It is designed to work with `TMergeVar`s. instance Ord objectId => Semigroup (PeerDecision objectId object) where PeerDecision - { objectIdsToAcknowledge - , objectIdsToRequest - , objectPipelineObjectIds = _ignored - , objectsToRequest - , objectsToObjectPool + { pdIdsToAck + , pdIdsToReq + , pdCanPipelineIdsReq = _ignored + , pdObjectsToReqIds + , pdObjectsOwtPool } <> PeerDecision - { objectIdsToAcknowledge = objectIdsToAcknowledge' - , objectIdsToRequest = objectIdsToRequest' - , objectPipelineObjectIds = objectPipelineObjectIds' - , objectsToRequest = objectsToRequest' - , objectsToObjectPool = objectsToObjectPool' + { pdIdsToAck = pdIdsToAck' + , pdIdsToReq = pdIdsToReq' + , pdCanPipelineIdsReq = pdCanPipelineIdsReq' + , pdObjectsToReqIds = pdObjectsToReqIds' + , pdObjectsOwtPool = pdObjectsOwtPool' } = PeerDecision - { objectIdsToAcknowledge = objectIdsToAcknowledge + objectIdsToAcknowledge' - , objectIdsToRequest = objectIdsToRequest + objectIdsToRequest' - , objectPipelineObjectIds = objectPipelineObjectIds' - , objectsToRequest = objectsToRequest <> objectsToRequest' - , objectsToObjectPool = objectsToObjectPool <> objectsToObjectPool' + { pdIdsToAck = pdIdsToAck + pdIdsToAck' + , pdIdsToReq = pdIdsToReq + pdIdsToReq' + , pdCanPipelineIdsReq = pdCanPipelineIdsReq' + , pdObjectsToReqIds = pdObjectsToReqIds <> pdObjectsToReqIds' + , pdObjectsOwtPool = pdObjectsOwtPool <> pdObjectsOwtPool' } -- | A no-op decision. emptyPeerDecision :: PeerDecision objectId object emptyPeerDecision = PeerDecision - { objectIdsToAcknowledge = 0 - , objectIdsToRequest = 0 - , objectPipelineObjectIds = False - , objectsToRequest = Map.empty - , objectsToObjectPool = mempty + { pdIdsToAck = 0 + , pdIdsToReq = 0 + , pdCanPipelineIdsReq = False + , pdObjectsToReqIds = Map.empty + , pdObjectsOwtPool = mempty } -- | ObjectLogic tracer. -data TraceObjectLogic peerAddr objectId object +data TraceDecisionLogic peerAddr objectId object = TraceDecisionGlobalState String (DecisionGlobalState peerAddr objectId object) | TracePeerDecisions (Map peerAddr (PeerDecision objectId object)) deriving Show -data ProcessedObjectCount = ProcessedObjectCount - { pobjectcAccepted :: Int - -- ^ Just accepted this many transactions. - , pobjectcRejected :: Int - -- ^ Just rejected this many transactions. - , pobjectcScore :: Double - } - deriving (Eq, Show) - --- | The consensus layer functionality that the inbound side of the object --- submission logic requires. --- --- This is provided to the object submission logic by the consensus layer. -data ObjectDiffusionObjectPoolWriter objectId object ticketNo m - = ObjectDiffusionObjectPoolWriter - { objectId :: object -> objectId - -- ^ Compute the transaction id from a transaction. - -- - -- This is used in the protocol handler to verify a full transaction - -- matches a previously given transaction id. - , objectpoolAddObjects :: [object] -> m [objectId] - -- ^ Supply a batch of transactions to the objectpool. They are either - -- accepted or rejected individually, but in the order supplied. - -- - -- The 'objectId's of all transactions that were added successfully are - -- returned. - } - -data TraceObjectDiffusionInbound objectId object - = -- | Number of transactions just about to be inserted. - TraceObjectDiffusionCollected [objectId] - | -- | Just processed transaction pass/fail breakdown. - TraceObjectDiffusionProcessed ProcessedObjectCount - | TraceObjectInboundCanRequestMoreObjects Int - | TraceObjectInboundCannotRequestMoreObjects Int - | TraceObjectInboundAddedToObjectPool [objectId] DiffTime - | TraceObjectInboundRejectedFromObjectPool [objectId] DiffTime - | TraceObjectInboundError ObjectDiffusionProtocolError - | -- - -- messages emitted by the new implementation of the server in - -- "Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.Server"; some of them are also - -- used in this module. - -- - - -- | Server received 'MsgDone' - TraceObjectInboundTerminated - | TraceObjectInboundDecision (PeerDecision objectId object) - deriving (Eq, Show) - data ObjectDiffusionCounters = ObjectDiffusionCounters - { numOfOutstandingObjectIds :: Int + { odcNumObjectsAvailable :: Int -- ^ objectIds which are not yet downloaded. This is a diff of keys sets of - -- `referenceCounts` and a sum of `globalObtainedButNotAckedObjects` and + -- `dgsObjectReferenceCounts` and a sum of `dgsObjectsPending` and -- `inbubmissionToObjectPoolObjects` maps. - , numOfBufferedObjects :: Int + , odcNumObjectsInFlight :: Int + -- ^ number of all in-flight objects. + , odcNumObjectsPending :: Int -- ^ number of all buffered objects (downloaded or not available) - , numOfInSubmissionToObjectPoolObjects :: Int + , odcNumObjectsOwtPool :: Int -- ^ number of all object's which were submitted to the objectpool - , numOfObjectIdsInflight :: Int - -- ^ number of all in-flight objectId's. } deriving (Eq, Show) -mkObjectDiffusionCounters :: +makeObjectDiffusionCounters :: Ord objectId => DecisionGlobalState peerAddr objectId object -> ObjectDiffusionCounters -mkObjectDiffusionCounters +makeObjectDiffusionCounters DecisionGlobalState - { globalInFlightObjects - , globalObtainedButNotAckedObjects - , referenceCounts - , globalToPoolObjects + { dgsObjectsInflightMultiplicities + , dgsObjectsPending + , dgsObjectReferenceCounts + , dgsObjectsOwtPool } = ObjectDiffusionCounters - { numOfOutstandingObjectIds = + { odcNumObjectsAvailable = Set.size $ - Map.keysSet referenceCounts - Set.\\ Map.keysSet globalObtainedButNotAckedObjects - Set.\\ Map.keysSet globalToPoolObjects - , numOfBufferedObjects = Map.size globalObtainedButNotAckedObjects - , numOfInSubmissionToObjectPoolObjects = Map.size globalToPoolObjects - , numOfObjectIdsInflight = getSum $ foldMap Sum globalInFlightObjects + Map.keysSet dgsObjectReferenceCounts + Set.\\ Map.keysSet dgsObjectsPending + Set.\\ Map.keysSet dgsObjectsOwtPool + , odcNumObjectsPending = Map.size dgsObjectsPending + , odcNumObjectsOwtPool = Map.size dgsObjectsOwtPool + , odcNumObjectsInFlight = getSum $ foldMap Sum dgsObjectsInflightMultiplicities } -data ObjectDiffusionProtocolError - = ProtocolErrorObjectNotRequested - | ProtocolErrorObjectIdsNotRequested - | -- | a list of objectId for which the received size and advertised size didn't - -- match. - forall objectId. - (Typeable objectId, Show objectId, Eq objectId) => - ProtocolErrorObjectSizeError [(objectId, SizeInBytes, SizeInBytes)] - -instance Eq ObjectDiffusionProtocolError where - ProtocolErrorObjectNotRequested == ProtocolErrorObjectNotRequested = True - ProtocolErrorObjectNotRequested == _ = False - ProtocolErrorObjectIdsNotRequested == ProtocolErrorObjectIdsNotRequested = True - ProtocolErrorObjectIdsNotRequested == _ = True - ProtocolErrorObjectSizeError (as :: [(a, SizeInBytes, SizeInBytes)]) - == ProtocolErrorObjectSizeError (as' :: [(a', SizeInBytes, SizeInBytes)]) = - case eqT @a @a' of - Nothing -> False - Just Refl -> as == as' - ProtocolErrorObjectSizeError{} == _ = False - -deriving instance Show ObjectDiffusionProtocolError - -instance Exception ObjectDiffusionProtocolError where - displayException ProtocolErrorObjectNotRequested = - "The peer replied with a transaction we did not ask for." - displayException ProtocolErrorObjectIdsNotRequested = - "The peer replied with more objectIds than we asked for." - displayException (ProtocolErrorObjectSizeError objectIds) = - "The peer received objects with wrong sizes " ++ show objectIds - data ObjectDiffusionInitDelay = ObjectDiffusionInitDelay DiffTime | NoObjectDiffusionInitDelay @@ -419,3 +313,40 @@ data ObjectDiffusionInitDelay defaultObjectDiffusionInitDelay :: ObjectDiffusionInitDelay defaultObjectDiffusionInitDelay = ObjectDiffusionInitDelay 60 + +-- Copied from V1: + +newtype NumObjectsProcessed + = NumObjectsProcessed + { getNumObjectsProcessed :: Word64 + } + deriving (Eq, Show) + +data TraceObjectDiffusionInbound objectId object + = -- | Number of objects just about to be inserted. + TraceObjectDiffusionInboundCollectedObjects Int + | -- | Just processed object pass/fail breakdown. + TraceObjectDiffusionInboundAddedObjects NumObjectsProcessed + | -- | Received a 'ControlMessage' from the outbound peer governor, and about + -- to act on it. + TraceObjectDiffusionInboundRecvControlMessage ControlMessage + | TraceObjectDiffusionInboundCanRequestMoreObjects Int + | TraceObjectDiffusionInboundCannotRequestMoreObjects Int + deriving (Eq, Show) + +data ObjectDiffusionInboundError + = ProtocolErrorObjectNotRequested + | ProtocolErrorObjectIdsNotRequested + | ProtocolErrorObjectIdAlreadyKnown + | ProtocolErrorObjectIdsDuplicate + deriving Show + +instance Exception ObjectDiffusionInboundError where + displayException ProtocolErrorObjectNotRequested = + "The peer replied with a object we did not ask for." + displayException ProtocolErrorObjectIdsNotRequested = + "The peer replied with more objectIds than we asked for." + displayException ProtocolErrorObjectIdAlreadyKnown = + "The peer replied with an objectId that it has already sent us previously." + displayException ProtocolErrorObjectIdsDuplicate = + "The peer replied with a batch of objectIds containing a duplicate." \ No newline at end of file From 94dc009d10061fd61cb4c566b1b7cd85558037fe Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Fri, 3 Oct 2025 16:10:27 +0200 Subject: [PATCH 06/43] Fix some more errors --- .../ObjectDiffusion/Inbound/V2.hs | 2 +- .../ObjectDiffusion/Inbound/V2/Decision.hs | 74 +++++++++---------- .../ObjectDiffusion/Inbound/V2/State.hs | 22 +++--- 3 files changed, 46 insertions(+), 52 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index b27d3a2740..02d67623c6 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -81,7 +81,7 @@ objectDiffusionInbound -- Block on next decision. object@PeerDecision { pdObjectsToReqIds = pdObjectsToReqIds - , pdObjectsOwtPool = [(objectId, object)]{listOf[(objectId, object)]} + , pdObjectsOwtPool = pdObjectsOwtPool } <- readPeerDecision traceWith tracer (TraceObjectInboundDecision object) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index bf638c86b9..917495b019 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -93,10 +93,10 @@ data DecisionInternalState peerAddr objectId object -- ^ number of all `object`s in-flight. , disObjectsInflightMultiplicities :: !(Map objectId Int) -- ^ `objectId`s in-flight. - , disObjectsAckedMultiplicities :: !(Map objectId Int) + , disIdsToAckMultiplicities :: !(Map objectId Int) -- ^ acknowledged `objectId` with multiplicities. It is used to update -- `dgsObjectReferenceCounts`. - , disObjectsOwtPoolds :: Set objectId + , disObjectsOwtPoolIds :: Set objectId -- ^ objects on their way to the objectpool. Used to prevent issueing new -- fetch requests for them. } @@ -144,9 +144,10 @@ pickObjectsToDownload -- initial state DecisionInternalState { disObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities - , disNumObjectsInflight = dgsObjectsInflightMultiplicitiesNum - , disObjectsAckedMultiplicities = Map.empty - , disObjectsOwtPoolds = Map.keysSet dgsObjectsOwtPool + , disNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 dgsObjectsInflightMultiplicities + -- Thomas: not sure here if we must count disctinct objects in flight, or total number of objects in flight (considering multiplicities) + , disIdsToAckMultiplicities = Map.empty + , disObjectsOwtPoolIds = Map.keysSet dgsObjectsOwtPool } >>> gn where @@ -162,40 +163,39 @@ pickObjectsToDownload st@DecisionInternalState { disObjectsInflightMultiplicities , disNumObjectsInflight - , disObjectsAckedMultiplicities - , disObjectsOwtPoolds + , disIdsToAckMultiplicities + , disObjectsOwtPoolIds } ( peerAddr , peerObjectState@DecisionPeerState { dpsIdsAvailable - , dpsObjectsRequestedButNotReceivedIds , dpsObjectsInflightIds } ) = - let sizeInflightAll :: NumObjects - sizeInflightOther :: NumObjects + let sizeInflightAll :: NumObjectsReq + sizeInflightOther :: NumObjectsReq sizeInflightAll = disNumObjectsInflight - sizeInflightOther = sizeInflightAll - Set.size dpsObjectsInflightIds + sizeInflightOther = sizeInflightAll - fromIntegral (Set.size dpsObjectsInflightIds) in if sizeInflightAll >= dpMaxNumObjectsInflightTotal then let ( numObjectIdsToAck , numObjectIdsToReq - , pdObjectsOwtPool@[(objectId, object)]{listOf[(objectId, object)]} - , RefCountDiff{objectIdsToAck} + , pdObjectsOwtPool + , RefCountDiff{rcdIdsToAckMultiplicities} , peerObjectState' ) = acknowledgeObjectIds policy sharedState peerObjectState - disObjectsAckedMultiplicities' = Map.unionWith (+) disObjectsAckedMultiplicities objectIdsToAck - disObjectsOwtPoolds' = - disObjectsOwtPoolds - <> Set.fromList (map fst listOf[(objectId, object)]) + disIdsToAckMultiplicities' = Map.unionWith (+) disIdsToAckMultiplicities rcdIdsToAckMultiplicities + disObjectsOwtPoolIds' = + disObjectsOwtPoolIds + <> Map.keysSet pdObjectsOwtPool in if dpsNumIdsInflight peerObjectState' > 0 then -- we have objectIds to request ( st - { disObjectsAckedMultiplicities = disObjectsAckedMultiplicities' - , disObjectsOwtPoolds = disObjectsOwtPoolds' + { disIdsToAckMultiplicities = disIdsToAckMultiplicities' + , disObjectsOwtPoolIds = disObjectsOwtPoolIds' } , ( (peerAddr, peerObjectState') @@ -207,7 +207,7 @@ pickObjectsToDownload . StrictSeq.null . dpsOutstandingFifo $ peerObjectState' - , pdObjectsToReqIds = Map.empty + , pdObjectsToReqIds = Set.empty , pdObjectsOwtPool = pdObjectsOwtPool } ) @@ -222,7 +222,7 @@ pickObjectsToDownload ) ) else - let dpsObjectsInflightIdsNum' :: NumObjects + let dpsObjectsInflightIdsNum' :: NumObjectsReq pdObjectsToReqIdsMap :: Set objectId (dpsObjectsInflightIdsNum', pdObjectsToReqIdsMap) = @@ -262,7 +262,7 @@ pickObjectsToDownload `Set.unions` ( Map.keysSet dgsObjectsPending <> dpsObjectsInflightIds <> dpsObjectsRequestedButNotReceivedIds - <> disObjectsOwtPoolds + <> disObjectsOwtPoolIds ) ) dpsObjectsInflightIdsNum @@ -276,12 +276,12 @@ pickObjectsToDownload ( numObjectIdsToAck , numObjectIdsToReq - , pdObjectsOwtPool@[(objectId, object)]{listOf[(objectId, object)]} - , RefCountDiff{objectIdsToAck} + , pdObjectsOwtPool + , RefCountDiff{rcdIdsToAckMultiplicities} , peerObjectState'' ) = acknowledgeObjectIds policy sharedState peerObjectState' - disObjectsAckedMultiplicities' = Map.unionWith (+) disObjectsAckedMultiplicities objectIdsToAck + disIdsToAckMultiplicities' = Map.unionWith (+) disIdsToAckMultiplicities rcdIdsToAckMultiplicities stInflightDelta :: Map objectId Int stInflightDelta = Map.fromSet (\_ -> 1) pdObjectsToReqIds @@ -291,17 +291,17 @@ pickObjectsToDownload disObjectsInflightMultiplicities' :: Map objectId Int disObjectsInflightMultiplicities' = Map.unionWith (+) stInflightDelta disObjectsInflightMultiplicities - disObjectsOwtPoolds' = - disObjectsOwtPoolds - <> Set.fromList (map fst listOf[(objectId, object)]) + disObjectsOwtPoolIds' = + disObjectsOwtPoolIds + <> Set.fromList (map fst pdObjectsOwtPool) in if dpsNumIdsInflight peerObjectState'' > 0 then -- we can request `objectId`s & `object`s ( DecisionInternalState { disObjectsInflightMultiplicities = disObjectsInflightMultiplicities' , disNumObjectsInflight = undefined - , disObjectsAckedMultiplicities = disObjectsAckedMultiplicities' - , disObjectsOwtPoolds = disObjectsOwtPoolds' + , disIdsToAckMultiplicities = disIdsToAckMultiplicities' + , disObjectsOwtPoolIds = disObjectsOwtPoolIds' } , ( (peerAddr, peerObjectState'') @@ -322,7 +322,7 @@ pickObjectsToDownload -- there are no `objectId`s to request, only `object`s. ( st { disObjectsInflightMultiplicities = disObjectsInflightMultiplicities' - , disObjectsOwtPoolds = disObjectsOwtPoolds' + , disObjectsOwtPoolIds = disObjectsOwtPoolIds' } , ( (peerAddr, peerObjectState'') @@ -340,7 +340,7 @@ pickObjectsToDownload gn ( DecisionInternalState { disObjectsInflightMultiplicities - , disObjectsAckedMultiplicities + , disIdsToAckMultiplicities } , as ) = @@ -358,7 +358,7 @@ pickObjectsToDownload else Nothing ) dgsObjectReferenceCounts - disObjectsAckedMultiplicities + disIdsToAckMultiplicities liveSet = Map.keysSet dgsObjectReferenceCounts' @@ -382,10 +382,9 @@ pickObjectsToDownload { pdIdsToAck = 0 , pdIdsToReq = 0 , pdObjectsToReqIds - , pdObjectsOwtPool = [(objectId, object)]{listOf[(objectId, object)]} - } + , pdObjectsOwtPool } | null pdObjectsToReqIds - , null listOf[(objectId, object)] -> + , Map.null pdObjectsOwtPool -> Nothing _ -> Just (a, b) ) @@ -398,7 +397,7 @@ pickObjectsToDownload (a, PeerDecision objectId object) -> Map objectId Int updateInSubmissionToObjectPoolObjects m (_, PeerDecision{pdObjectsOwtPool}) = - List.foldl' fn m (listOf[(objectId, object)] pdObjectsOwtPool) + List.foldl' fn m (Map.toList pdObjectsOwtPool) where fn :: Map objectId Int -> @@ -460,7 +459,6 @@ filterActivePeers , dpsNumIdsInflight , dpsObjectsInflightIds , dpsIdsAvailable - , dpsObjectsRequestedButNotReceivedIds } = ( dpsNumIdsInflight == 0 && dpsNumIdsInflight + numOfUnacked <= dpMaxNumObjectsOutstanding diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 2cc19a6d2d..5e387ff481 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -64,7 +64,8 @@ acknowledgeObjectIds :: -- objectpool, objectIds to acknowledge with multiplicities, updated DecisionPeerState. ( NumObjectIdsAck , NumObjectIdsReq - , [(objectId, object)] + , Map objectId object + -- ^ objectsOwtPool , RefCountDiff objectId , DecisionPeerState objectId object ) @@ -74,7 +75,6 @@ acknowledgeObjectIds sharedObjectState ps@DecisionPeerState { dpsIdsAvailable - , dpsObjectsRequestedButNotReceivedIds , dpsNumIdsInflight , dpsObjectsPending , dpsScore @@ -86,12 +86,11 @@ acknowledgeObjectIds then ( pdIdsToAck , pdIdsToReq - , [(objectId, object)] pdObjectsOwtPool + , objectsOwtPool , refCountDiff , ps { dpsOutstandingFifo = dpsOutstandingFifo' , dpsIdsAvailable = dpsIdsAvailable' - , dpsObjectsRequestedButNotReceivedIds = dpsObjectsRequestedButNotReceivedIds' , dpsNumIdsInflight = dpsNumIdsInflight + pdIdsToReq @@ -103,7 +102,7 @@ acknowledgeObjectIds else ( 0 , 0 - , [(objectId, object)] pdObjectsOwtPool + , objectsOwtPool , RefCountDiff Map.empty , ps{dpsObjectsOwtPool = dpsObjectsOwtPool'} ) @@ -113,7 +112,7 @@ acknowledgeObjectIds (pdIdsToReq, acknowledgedObjectIds, dpsOutstandingFifo') = splitAcknowledgedObjectIds policy sharedObjectState ps - pdObjectsOwtPool = + objectsOwtPoolList = [ (objectId, object) | objectId <- toList toObjectPoolObjectIds , objectId `Map.notMember` dgsObjectsPending sharedObjectState @@ -122,9 +121,9 @@ acknowledgeObjectIds (toObjectPoolObjectIds, _) = StrictSeq.spanl (`Map.member` dpsObjectsPending) acknowledgedObjectIds - pdObjectsOwtPoolMap = Map.fromList pdObjectsOwtPool + objectsOwtPool = Map.fromList objectsOwtPoolList - dpsObjectsOwtPool' = dpsObjectsOwtPool <> pdObjectsOwtPoolMap + dpsObjectsOwtPool' = dpsObjectsOwtPool <> objectsOwtPool (dpsObjectsPending', ackedDownloadedObjects) = Map.partitionWithKey (\objectId _ -> objectId `Set.member` liveSet) dpsObjectsPending -- latexObjects: objects which were downloaded by another peer before we @@ -132,7 +131,7 @@ acknowledgeObjectIds -- `dgsObjectsPending`. lateObjects = Map.filterWithKey - (\objectId _ -> objectId `Map.notMember` pdObjectsOwtPoolMap) + (\objectId _ -> objectId `Map.notMember` objectsOwtPool) ackedDownloadedObjects dpsScore' = dpsScore + fromIntegral (Map.size lateObjects) @@ -144,7 +143,6 @@ acknowledgeObjectIds -- `dpsOutstandingFifo''`, but also return the unknown set before any -- modifications (which is used to compute `dpsOutstandingFifo''` -- above). - dpsObjectsRequestedButNotReceivedIds' = dpsObjectsRequestedButNotReceivedIds `Set.intersection` liveSet refCountDiff = RefCountDiff $ @@ -180,7 +178,6 @@ splitAcknowledgedObjectIds } DecisionPeerState { dpsOutstandingFifo - , dpsObjectsRequestedButNotReceivedIds , dpsObjectsPending , dpsObjectsInflightIds , dpsNumIdsInflight @@ -210,7 +207,7 @@ splitAcknowledgedObjectIds -- | `RefCountDiff` represents a map of `objectId` which can be acknowledged -- together with their multiplicities. newtype RefCountDiff objectId = RefCountDiff - { objectIdsToAck :: Map objectId Int + { rcdIdsToAckMultiplicities :: Map objectId Int } updateRefCounts :: @@ -540,7 +537,6 @@ collectObjectsImpl ps'' = ps { dpsIdsAvailable = dpsIdsAvailable'' - , dpsObjectsRequestedButNotReceivedIds = dpsObjectsRequestedButNotReceivedIds'' , dpsObjectsInflightIds = dpsObjectsInflightIds' , dpsObjectsPending = dpsObjectsPending' } From 2a899e95eab852623954375aa104ce5e99132664 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Mon, 6 Oct 2025 17:08:40 +0200 Subject: [PATCH 07/43] Continue re-organizing decision impl --- .../ObjectDiffusion/Inbound/V2.hs | 32 +- .../ObjectDiffusion/Inbound/V2/Registry.hs | 426 ++++++------------ .../ObjectDiffusion/Inbound/V2/State.hs | 146 +++++- 3 files changed, 288 insertions(+), 316 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 02d67623c6..245788dd77 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -10,16 +10,16 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 ( -- * ObjectDiffusion Inbound client objectDiffusionInbound - -- * PeerObjectAPI + -- * InboundPeerAPI , withPeer - , PeerObjectAPI + , InboundPeerAPI -- * Supporting types , module V2 - , ObjectChannelsVar - , newObjectChannelsVar - , ObjectObjectPoolSem - , newObjectObjectPoolSem + , PeerDecisionChannelsVar + , newPeerDecisionChannelsVar + , ObjectPoolSem + , newObjectPoolSem , DecisionGlobalStateVar , newDecisionGlobalStateVar , DecisionPolicy (..) @@ -56,18 +56,18 @@ objectDiffusionInbound :: ) => Tracer m (TraceObjectDiffusionInbound objectId object) -> ObjectDiffusionInitDelay -> - ObjectDiffusionObjectPoolWriter objectId object ticketNo m -> - PeerObjectAPI m objectId object -> + ObjectPoolWriter objectId object ticketNo m -> + InboundPeerAPI m objectId object -> ObjectDiffusionServerPipelined objectId object m () objectDiffusionInbound tracer initDelay - ObjectDiffusionObjectPoolWriter{objectId} - PeerObjectAPI + ObjectPoolWriter{objectId} + InboundPeerAPI { readPeerDecision - , handleReceivedObjectIds + , handleReceivedIds , handleReceivedObjects - , submitObjectToObjectPool + , submitObjectToPool } = ObjectDiffusionServerPipelined $ do case initDelay of @@ -90,12 +90,12 @@ objectDiffusionInbound -- Only attempt to add objects if we have some work to do when (collected > 0) $ do - -- submitObjectToObjectPool traces: + -- submitObjectToPool traces: -- \* `TraceObjectDiffusionProcessed`, -- \* `TraceObjectInboundAddedToObjectPool`, and -- \* `TraceObjectInboundRejectedFromObjectPool` -- events. - mapM_ (uncurry $ submitObjectToObjectPool tracer) listOf[(objectId, object)] + mapM_ (uncurry $ submitObjectToPool tracer) listOf[(objectId, object)] -- TODO: -- We can update the state so that other `object-submission` servers will @@ -148,7 +148,7 @@ objectDiffusionInbound objectIdsMap = Map.fromList objectIds' unless (StrictSeq.length objectIdsSeq <= fromIntegral objectIdsToReq) $ throwIO ProtocolErrorObjectIdsNotRequested - handleReceivedObjectIds objectIdsToReq objectIdsSeq objectIdsMap + handleReceivedIds objectIdsToReq objectIdsSeq objectIdsMap serverIdle ) serverReqObjectIds @@ -206,7 +206,7 @@ objectDiffusionInbound objectIdsMap = Map.fromList objectIds unless (StrictSeq.length objectIdsSeq <= fromIntegral objectIdsToReq) $ throwIO ProtocolErrorObjectIdsNotRequested - handleReceivedObjectIds objectIdsToReq objectIdsSeq objectIdsMap + handleReceivedIds objectIdsToReq objectIdsSeq objectIdsMap k CollectObjects objectIds objects -> do let requested = Map.keysSet objectIds diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 49033f2c43..dbb592a709 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -6,14 +6,14 @@ {-# LANGUAGE ScopedTypeVariables #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry - ( ObjectChannels (..) - , ObjectChannelsVar - , ObjectObjectPoolSem + ( PeerDecisionChannels (..) + , PeerDecisionChannelsVar + , ObjectPoolSem , DecisionGlobalStateVar , newDecisionGlobalStateVar - , newObjectChannelsVar - , newObjectObjectPoolSem - , PeerObjectAPI (..) + , newPeerDecisionChannelsVar + , newObjectPoolSem + , InboundPeerAPI (..) , decisionLogicThreads , withPeer ) where @@ -44,55 +44,35 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API import Ouroboros.Network.Protocol.ObjectDiffusion.Type --- | Communication channels between `ObjectDiffusion` client mini-protocol and --- decision logic. -newtype ObjectChannels m peerAddr objectId object = ObjectChannels - { objectChannelMap :: Map peerAddr (StrictMVar m (PeerDecision objectId object)) - } +-- | Communication channels between `ObjectDiffusion` mini-protocol inbound side +-- and decision logic. +type PeerDecisionChannels m peerAddr objectId object = + Map peerAddr (StrictMVar m (PeerDecision objectId object)) -type ObjectChannelsVar m peerAddr objectId object = - StrictMVar m (ObjectChannels m peerAddr objectId object) +type PeerDecisionChannelsVar m peerAddr objectId object = + StrictMVar m (PeerDecisionChannels m peerAddr objectId object) -newObjectChannelsVar :: MonadMVar m => m (ObjectChannelsVar m peerAddr objectId object) -newObjectChannelsVar = newMVar (ObjectChannels Map.empty) +newPeerDecisionChannelsVar :: + MonadMVar m => m (PeerDecisionChannelsVar m peerAddr objectId object) +newPeerDecisionChannelsVar = newMVar (PeerDecisionChannels Map.empty) -newtype ObjectObjectPoolSem m = ObjectObjectPoolSem (TSem m) +-- | Semaphore to guard access to the ObjectPool +newtype ObjectPoolSem m = ObjectPoolSem (TSem m) -newObjectObjectPoolSem :: MonadSTM m => m (ObjectObjectPoolSem m) -newObjectObjectPoolSem = ObjectObjectPoolSem <$> atomically (newTSem 1) +newObjectPoolSem :: MonadSTM m => m (ObjectPoolSem m) +newObjectPoolSem = ObjectPoolSem <$> atomically (newTSem 1) --- | API to access `DecisionPeerState` inside `DecisionPeerStateVar`. -data PeerObjectAPI m objectId object = PeerObjectAPI +data InboundPeerAPI m objectId object = InboundPeerAPI { readPeerDecision :: m (PeerDecision objectId object) -- ^ a blocking action which reads `PeerDecision` - , handleReceivedObjectIds :: - NumObjectIdsReq -> - StrictSeq objectId -> - -- \^ received objectIds - Map objectId SizeInBytes -> - -- \^ received sizes of advertised object's - m () - -- ^ handle received objectIds - , handleReceivedObjects :: - Map objectId SizeInBytes -> - -- \^ requested objectIds - Map objectId object -> - -- \^ received objects - m (Maybe ObjectDiffusionInboundError) - -- ^ handle received objects - , submitObjectToObjectPool :: - Tracer m (TraceObjectDiffusionInbound objectId object) -> - objectId -> - object -> - m () - -- ^ submit the given (objectId, object) to the objectpool. + , handleReceivedIds :: [objectId] -> m () + , handleReceivedObjects :: [object] -> m () + , submitObjectsToPool :: [object] -> m () } -data ObjectObjectPoolResult = ObjectAccepted | ObjectRejected - -- | A bracket function which registers / de-registers a new peer in --- `DecisionGlobalStateVar` and `DecisionPeerStateVar`s, which exposes `DecisionPeerStateAPI`. --- `DecisionPeerStateAPI` is only safe inside the `withPeer` scope. +-- `DecisionGlobalStateVar` and `PeerDecisionChannelsVar`s, which exposes `InboundPeerAPI`. +-- `InboundPeerAPI` is only safe inside the `withPeer` scope. withPeer :: forall object peerAddr objectId ticketNo m a. ( MonadMask m @@ -106,74 +86,93 @@ withPeer :: , Show peerAddr ) => Tracer m (TraceDecisionLogic peerAddr objectId object) -> - ObjectChannelsVar m peerAddr objectId object -> - ObjectObjectPoolSem m -> + PeerDecisionChannelsVar m peerAddr objectId object -> + ObjectPoolSem m -> DecisionPolicy -> DecisionGlobalStateVar m peerAddr objectId object -> - ObjectDiffusionObjectPoolReader objectId object ticketNo m -> - ObjectDiffusionObjectPoolWriter objectId object ticketNo m -> - (object -> SizeInBytes) -> + ObjectPoolReader objectId object ticketNo m -> + ObjectPoolWriter objectId object ticketNo m -> + -- | new peer peerAddr -> - -- ^ new peer - - -- | callback which gives access to `DecisionPeerStateAPI` - (PeerObjectAPI m objectId object -> m a) -> + -- | callback which gives access to `InboundPeerAPI` + (InboundPeerAPI m objectId object -> m a) -> m a withPeer - tracer - channelsVar - (ObjectObjectPoolSem objectpoolSem) + decisionTracer + decisionChannelsVar + (ObjectPoolSem poolSem) policy@DecisionPolicy{dpMinObtainedButNotAckedObjectsLifetime} - sharedStateVar - ObjectDiffusionObjectPoolReader{objectpoolGetSnapshot} - ObjectDiffusionObjectPoolWriter{objectpoolAddObjects} - objectSize + globalStateVar + ObjectPoolReader{} + ObjectPoolWriter{opwAddObjects} peerAddr - io = - bracket - ( do - -- create a communication channel - !peerObjectAPI <- + withApi = + bracket registerPeerAndCreateAPI unregisterPeer withAPI + where + registerPeerAndCreateAPI :: m (InboundPeerAPI m objectId object) + registerPeerAndCreateAPI = do + -- create the API for this peer, obtaining a channel for it in the process + !inboundPeerAPI <- modifyMVar - channelsVar - \ObjectChannels{objectChannelMap} -> do - chann <- newEmptyMVar - let (chann', objectChannelMap') = - Map.alterF - ( \mbChann -> - let !chann'' = fromMaybe chann mbChann - in (chann'', Just chann'') - ) - peerAddr - objectChannelMap + decisionChannelsVar + \peerToChannel -> do + -- We get a channel for this peer, and register it in peerToChannel. + (chan', peerToChannel') <- + case peerToChannel Map.!? peerAddr of + -- Checks if a channel already exists for this peer, in case we reuse it + Just chan -> return (chan, peerToChannel) + -- Otherwise create a new channel and register it + Nothing -> do + chan <- newEmptyMVar + return (chan, Map.insert peerAddr chan peerToChannel) return - ( ObjectChannels{objectChannelMap = objectChannelMap'} - , PeerObjectAPI - { readPeerDecision = takeMVar chann' - , handleReceivedObjectIds - , handleReceivedObjects - , submitObjectToObjectPool + ( peerToChannel' + , InboundPeerAPI + { readPeerDecision = takeMVar chan' + , handleReceivedIds = + collectIds + decisionTracer + globalStateVar + objectpoolGetSnapshot + peerAddr + numObjectIdsToReq + objectIdsSeq + objectIdsMap + , handleReceivedObjects = + collectObjects + decisionTracer + objectSize + globalStateVar + peerAddr + objectIds + objects + , submitObjectsToPool } ) - - atomically $ modifyTVar sharedStateVar registerPeer - return peerObjectAPI - ) + -- register the peer in the global state now + atomically $ modifyTVar globalStateVar registerPeerGlobalState + -- initialization is complete for this peer, it can proceed and + -- interact through its given API + return inboundPeerAPI + where + + unregisterPeer :: InboundPeerAPI m objectId object -> m () + unregisterPeer _ = -- the handler is a short blocking operation, thus we need to use -- `uninterruptibleMask_` - ( \_ -> uninterruptibleMask_ do - atomically $ modifyTVar sharedStateVar unregisterPeer + uninterruptibleMask_ do + -- unregister the peer from the global state + atomically $ modifyTVar globalStateVar unregisterPeerGlobalState + -- remove the channel of this peer from the global channel map modifyMVar_ - channelsVar - \ObjectChannels{objectChannelMap} -> - return ObjectChannels{objectChannelMap = Map.delete peerAddr objectChannelMap} - ) - io - where - registerPeer :: + decisionChannelsVar + \peerToChannel -> + return $ Map.delete peerAddr peerToChannel + + registerPeerGlobalState :: DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object - registerPeer st@DecisionGlobalState{dgsPeerStates} = + registerPeerGlobalState st@DecisionGlobalState{dgsPeerStates} = st { dgsPeerStates = Map.insert @@ -195,10 +194,10 @@ withPeer -- TODO: this function needs to be tested! -- Issue: https://github.com/IntersectMBO/ouroboros-network/issues/5151 - unregisterPeer :: + unregisterPeerGlobalState :: DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object - unregisterPeer + unregisterPeerGlobalState st@DecisionGlobalState { dgsPeerStates , dgsObjectsPending @@ -268,183 +267,26 @@ withPeer fn (Just n) | n > 1 = Just $! pred n fn _ = Nothing - -- - -- PeerObjectAPI - -- - - submitObjectToObjectPool :: - Tracer m (TraceObjectDiffusionInbound objectId object) -> objectId -> object -> m () - submitObjectToObjectPool objectTracer objectId object = - bracket_ - (atomically $ waitTSem objectpoolSem) - (atomically $ signalTSem objectpoolSem) - $ do - start <- getMonotonicTime - res <- addObject - end <- getMonotonicTime - atomically $ modifyTVar sharedStateVar (updateBufferedObject end res) - let duration = end `diffTime` start - case res of - ObjectAccepted -> traceWith objectTracer (TraceObjectInboundAddedToObjectPool [objectId] duration) - ObjectRejected -> traceWith objectTracer (TraceObjectInboundRejectedFromObjectPool [objectId] duration) - where - -- add the object to the objectpool - addObject :: m ObjectObjectPoolResult - addObject = do - mpSnapshot <- atomically objectpoolGetSnapshot - - -- Note that checking if the objectpool contains a object before - -- spending several ms attempting to add it to the pool has - -- been judged immoral. - if objectpoolHasObject mpSnapshot objectId - then do - !now <- getMonotonicTime - !s <- countRejectedObjects now 1 - traceWith objectTracer $ - TraceObjectDiffusionProcessed - ProcessedObjectCount - { pobjectcAccepted = 0 - , pobjectcRejected = 1 - , pobjectcScore = s - } - return ObjectRejected - else do - acceptedObjects <- objectpoolAddObjects [object] - end <- getMonotonicTime - if null acceptedObjects - then do - !s <- countRejectedObjects end 1 - traceWith objectTracer $ - TraceObjectDiffusionProcessed - ProcessedObjectCount - { pobjectcAccepted = 0 - , pobjectcRejected = 1 - , pobjectcScore = s - } - return ObjectRejected - else do - !s <- countRejectedObjects end 0 - traceWith objectTracer $ - TraceObjectDiffusionProcessed - ProcessedObjectCount - { pobjectcAccepted = 1 - , pobjectcRejected = 0 - , pobjectcScore = s - } - return ObjectAccepted - - updateBufferedObject :: - Time -> - ObjectObjectPoolResult -> - DecisionGlobalState peerAddr objectId object -> - DecisionGlobalState peerAddr objectId object - updateBufferedObject - _ - ObjectRejected - st@DecisionGlobalState - { dgsPeerStates - , dgsObjectsOwtPool - } = - st - { dgsPeerStates = dgsPeerStates' - , dgsObjectsOwtPool = dgsObjectsOwtPool' - } - where - dgsObjectsOwtPool' = - Map.update - (\case 1 -> Nothing; n -> Just $! pred n) - objectId - dgsObjectsOwtPool - - dgsPeerStates' = Map.update fn peerAddr dgsPeerStates - where - fn ps = Just $! ps{dpsObjectsOwtPool = Map.delete objectId (dpsObjectsOwtPool ps)} - updateBufferedObject - now - ObjectAccepted - st@DecisionGlobalState - { dgsPeerStates - , dgsObjectsPending - , dgsObjectReferenceCounts - , dgsRententionTimeouts - , dgsObjectsOwtPool - } = - st - { dgsPeerStates = dgsPeerStates' - , dgsObjectsPending = dgsObjectsPending' - , dgsRententionTimeouts = dgsRententionTimeouts' - , dgsObjectReferenceCounts = dgsObjectReferenceCounts' - , dgsObjectsOwtPool = dgsObjectsOwtPool' - } - where - dgsObjectsOwtPool' = - Map.update - (\case 1 -> Nothing; n -> Just $! pred n) - objectId - dgsObjectsOwtPool - - dgsRententionTimeouts' = Map.alter fn (addTime dpMinObtainedButNotAckedObjectsLifetime now) dgsRententionTimeouts - where - fn :: Maybe [objectId] -> Maybe [objectId] - fn Nothing = Just [objectId] - fn (Just objectIds) = Just $! (objectId : objectIds) - - dgsObjectReferenceCounts' = Map.alter fn objectId dgsObjectReferenceCounts - where - fn :: Maybe Int -> Maybe Int - fn Nothing = Just 1 - fn (Just n) = Just $! succ n - - dgsObjectsPending' = Map.insert objectId (Just object) dgsObjectsPending - - dgsPeerStates' = Map.update fn peerAddr dgsPeerStates - where - fn ps = Just $! ps{dpsObjectsOwtPool = Map.delete objectId (dpsObjectsOwtPool ps)} - - handleReceivedObjectIds :: - NumObjectIdsReq -> - StrictSeq objectId -> - Map objectId SizeInBytes -> - m () - handleReceivedObjectIds numObjectIdsToReq objectIdsSeq objectIdsMap = - receivedObjectIds - tracer - sharedStateVar - objectpoolGetSnapshot - peerAddr - numObjectIdsToReq - objectIdsSeq - objectIdsMap - - handleReceivedObjects :: - Map objectId SizeInBytes -> - -- \^ requested objectIds with their announced size - Map objectId object -> - -- \^ received objects - m (Maybe ObjectDiffusionInboundError) - handleReceivedObjects objectIds objects = - collectObjects tracer objectSize sharedStateVar peerAddr objectIds objects - - -- Update `dpsScore` & `dpsScoreLastUpdatedAt` fields of `DecisionPeerState`, return the new - -- updated `dpsScore`. - -- - -- PRECONDITION: the `Double` argument is non-negative. - countRejectedObjects :: - Time -> - Double -> - m Double - countRejectedObjects _ n - | n < 0 = - error ("ObjectDiffusion.countRejectedObjects: invariant violation for peer " ++ show peerAddr) - countRejectedObjects now n = atomically $ stateTVar sharedStateVar $ \st -> - let (result, dgsPeerStates') = Map.alterF fn peerAddr (dgsPeerStates st) - in (result, st{dgsPeerStates = dgsPeerStates'}) - where - fn :: Maybe (DecisionPeerState objectId object) -> (Double, Maybe (DecisionPeerState objectId object)) - fn Nothing = error ("ObjectDiffusion.withPeer: invariant violation for peer " ++ show peerAddr) - fn (Just ps) = (dpsScore ps', Just $! ps') - where - ps' = updateRejects policy now n ps +-- Update `dpsScore` & `dpsScoreLastUpdatedAt` fields of `DecisionPeerState`, return the new +-- updated `dpsScore`. +-- +-- PRECONDITION: the `Double` argument is non-negative. +countRejectedObjects :: + Time -> + Double -> + m Double +countRejectedObjects _ n + | n < 0 = + error ("ObjectDiffusion.countRejectedObjects: invariant violation for peer " ++ show peerAddr) +countRejectedObjects now n = atomically $ stateTVar globalStateVar $ \st -> + let (result, dgsPeerStates') = Map.alterF fn peerAddr (dgsPeerStates st) + in (result, st{dgsPeerStates = dgsPeerStates'}) + where + fn :: Maybe (DecisionPeerState objectId object) -> (Double, Maybe (DecisionPeerState objectId object)) + fn Nothing = error ("ObjectDiffusion.withPeer: invariant violation for peer " ++ show peerAddr) + fn (Just ps) = (dpsScore ps', Just $! ps') + where + ps' = updateRejects policy now n ps updateRejects :: DecisionPolicy -> @@ -477,7 +319,7 @@ drainRejectionThread :: DecisionPolicy -> DecisionGlobalStateVar m peerAddr objectId object -> m Void -drainRejectionThread tracer policy sharedStateVar = do +drainRejectionThread decisionTracer policy globalStateVar = do labelThisThread "object-rejection-drain" now <- getMonotonicTime go $ addTime drainInterval now @@ -491,7 +333,7 @@ drainRejectionThread tracer policy sharedStateVar = do !now <- getMonotonicTime st'' <- atomically $ do - st <- readTVar sharedStateVar + st <- readTVar globalStateVar let ptss = if now > nextDrain then Map.map (updateRejects policy now 0) (dgsPeerStates st) @@ -502,9 +344,9 @@ drainRejectionThread tracer policy sharedStateVar = do st { dgsPeerStates = ptss } - writeTVar sharedStateVar st' + writeTVar globalStateVar st' return st' - traceWith tracer (TraceDecisionGlobalState "drainRejectionThread" st'') + traceWith decisionTracer (TraceDecisionGlobalState "drainRejectionThread" st'') if now > nextDrain then go $ addTime drainInterval now @@ -524,10 +366,10 @@ decisionLogicThread :: Tracer m (TraceDecisionLogic peerAddr objectId object) -> Tracer m ObjectDiffusionCounters -> DecisionPolicy -> - ObjectChannelsVar m peerAddr objectId object -> + PeerDecisionChannelsVar m peerAddr objectId object -> DecisionGlobalStateVar m peerAddr objectId object -> m Void -decisionLogicThread tracer counterTracer policy objectChannelsVar sharedStateVar = do +decisionLogicThread decisionTracer counterTracer policy objectChannelsVar globalStateVar = do labelThisThread "object-decision" go where @@ -538,23 +380,23 @@ decisionLogicThread tracer counterTracer policy objectChannelsVar sharedStateVar threadDelay _DECISION_LOOP_DELAY (decisions, st) <- atomically do - sharedObjectState <- readTVar sharedStateVar + sharedObjectState <- readTVar globalStateVar let activePeers = filterActivePeers policy sharedObjectState -- block until at least one peer is active check (not (Map.null activePeers)) let (sharedState, decisions) = makeDecisions policy sharedObjectState activePeers - writeTVar sharedStateVar sharedState + writeTVar globalStateVar sharedState return (decisions, sharedState) - traceWith tracer (TraceDecisionGlobalState "decisionLogicThread" st) - traceWith tracer (TracePeerDecisions decisions) - ObjectChannels{objectChannelMap} <- readMVar objectChannelsVar + traceWith decisionTracer (TraceDecisionGlobalState "decisionLogicThread" st) + traceWith decisionTracer (TracePeerDecisions decisions) + PeerDecisionChannels{peerToChannel} <- readMVar objectChannelsVar traverse_ (\(mvar, d) -> modifyMVarWithDefault_ mvar d (\d' -> pure (d' <> d))) ( Map.intersectionWith (,) - objectChannelMap + peerToChannel decisions ) traceWith counterTracer (makeObjectDiffusionCounters st) @@ -586,13 +428,13 @@ decisionLogicThreads :: Tracer m (TraceDecisionLogic peerAddr objectId object) -> Tracer m ObjectDiffusionCounters -> DecisionPolicy -> - ObjectChannelsVar m peerAddr objectId object -> + PeerDecisionChannelsVar m peerAddr objectId object -> DecisionGlobalStateVar m peerAddr objectId object -> m Void -decisionLogicThreads tracer counterTracer policy objectChannelsVar sharedStateVar = +decisionLogicThreads decisionTracer counterTracer policy objectChannelsVar globalStateVar = uncurry (<>) - <$> drainRejectionThread tracer policy sharedStateVar - `concurrently` decisionLogicThread tracer counterTracer policy objectChannelsVar sharedStateVar + <$> drainRejectionThread decisionTracer policy globalStateVar + `concurrently` decisionLogicThread decisionTracer counterTracer policy objectChannelsVar globalStateVar -- `5ms` delay _DECISION_LOOP_DELAY :: DiffTime diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 5e387ff481..28a0c2f9a7 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -12,8 +12,9 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State , DecisionPeerState (..) , DecisionGlobalStateVar , newDecisionGlobalStateVar - , receivedObjectIds + , collectIds , collectObjects + , submitObjectToPool , acknowledgeObjectIds , splitAcknowledgedObjectIds , tickTimedObjects @@ -22,7 +23,7 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State -- * Internals, only exported for testing purposes: , RefCountDiff (..) , updateRefCounts - , receivedObjectIdsImpl + , collectIdsImpl , collectObjectsImpl ) where @@ -284,7 +285,7 @@ tickTimedObjects -- | Insert received `objectId`s and return the number of objectIds to be acknowledged -- and the updated `DecisionGlobalState`. -receivedObjectIdsImpl :: +collectIdsImpl :: forall peerAddr object objectId. (Ord objectId, Ord peerAddr, HasCallStack) => -- | check if objectId is in the objectpool, ref @@ -300,7 +301,7 @@ receivedObjectIdsImpl :: Set objectId -> DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object -receivedObjectIdsImpl +collectIdsImpl objectpoolHasObject peerAddr reqNo @@ -566,7 +567,7 @@ newDecisionGlobalStateVar rng = -- | Acknowledge `objectId`s, return the number of `objectIds` to be acknowledged to the -- remote side. -receivedObjectIds :: +collectIds :: forall m peerAddr ticketNo object objectId. (MonadSTM m, Ord objectId, Ord peerAddr) => Tracer m (TraceDecisionLogic peerAddr objectId object) -> @@ -581,13 +582,13 @@ receivedObjectIds :: -- | received `objectId`s Set objectId -> m () -receivedObjectIds tracer sharedVar objectPoolWriter peerAddr reqNo objectIdsSeq objectIds = do +collectIds tracer sharedVar objectPoolWriter peerAddr reqNo objectIdsSeq objectIds = do st <- atomically $ do hasObject <- opwHasObject objectPoolWriter stateTVar sharedVar - ((\a -> (a, a)) . receivedObjectIdsImpl hasObject peerAddr reqNo objectIdsSeq objectIds) - traceWith tracer (TraceDecisionGlobalState "receivedObjectIds" st) + ((\a -> (a, a)) . collectIdsImpl hasObject peerAddr reqNo objectIdsSeq objectIds) + traceWith tracer (TraceDecisionGlobalState "collectIds" st) -- | Include received `object`s in `DecisionGlobalState`. Return number of `objectIds` -- to be acknowledged and list of `object` to be added to the objectpool. @@ -623,3 +624,132 @@ collectObjects tracer objectSize sharedVar peerAddr objectIdsRequested objectsMa traceWith tracer (TraceDecisionGlobalState "collectObjects" st) $> Nothing Left e -> return (Just e) + +submitObjectToPool :: + Tracer m (TraceObjectDiffusionInbound objectId object) -> objectId -> object -> m () +submitObjectToPool objectTracer objectId object = + bracket_ + (atomically $ waitTSem poolSem) + (atomically $ signalTSem poolSem) + $ do + start <- getMonotonicTime + res <- addObject + end <- getMonotonicTime + atomically $ modifyTVar globalStateVar (updateBufferedObject end res) + let duration = end `diffTime` start + case res of + ObjectAccepted -> traceWith objectTracer (TraceObjectInboundAddedToObjectPool [objectId] duration) + ObjectRejected -> traceWith objectTracer (TraceObjectInboundRejectedFromObjectPool [objectId] duration) + where + -- add the object to the objectpool + addObject :: m ObjectObjectPoolResult + addObject = do + mpSnapshot <- atomically objectpoolGetSnapshot + + -- Note that checking if the objectpool contains a object before + -- spending several ms attempting to add it to the pool has + -- been judged immoral. + if objectpoolHasObject mpSnapshot objectId + then do + !now <- getMonotonicTime + !s <- countRejectedObjects now 1 + traceWith objectTracer $ + TraceObjectDiffusionProcessed + ProcessedObjectCount + { pobjectcAccepted = 0 + , pobjectcRejected = 1 + , pobjectcScore = s + } + return ObjectRejected + else do + acceptedObjects <- objectpoolAddObjects [object] + end <- getMonotonicTime + if null acceptedObjects + then do + !s <- countRejectedObjects end 1 + traceWith objectTracer $ + TraceObjectDiffusionProcessed + ProcessedObjectCount + { pobjectcAccepted = 0 + , pobjectcRejected = 1 + , pobjectcScore = s + } + return ObjectRejected + else do + !s <- countRejectedObjects end 0 + traceWith objectTracer $ + TraceObjectDiffusionProcessed + ProcessedObjectCount + { pobjectcAccepted = 1 + , pobjectcRejected = 0 + , pobjectcScore = s + } + return ObjectAccepted + + updateBufferedObject :: + Time -> + ObjectObjectPoolResult -> + DecisionGlobalState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object + updateBufferedObject + _ + ObjectRejected + st@DecisionGlobalState + { dgsPeerStates + , dgsObjectsOwtPool + } = + st + { dgsPeerStates = dgsPeerStates' + , dgsObjectsOwtPool = dgsObjectsOwtPool' + } + where + dgsObjectsOwtPool' = + Map.update + (\case 1 -> Nothing; n -> Just $! pred n) + objectId + dgsObjectsOwtPool + + dgsPeerStates' = Map.update fn peerAddr dgsPeerStates + where + fn ps = Just $! ps{dpsObjectsOwtPool = Map.delete objectId (dpsObjectsOwtPool ps)} + updateBufferedObject + now + ObjectAccepted + st@DecisionGlobalState + { dgsPeerStates + , dgsObjectsPending + , dgsObjectReferenceCounts + , dgsRententionTimeouts + , dgsObjectsOwtPool + } = + st + { dgsPeerStates = dgsPeerStates' + , dgsObjectsPending = dgsObjectsPending' + , dgsRententionTimeouts = dgsRententionTimeouts' + , dgsObjectReferenceCounts = dgsObjectReferenceCounts' + , dgsObjectsOwtPool = dgsObjectsOwtPool' + } + where + dgsObjectsOwtPool' = + Map.update + (\case 1 -> Nothing; n -> Just $! pred n) + objectId + dgsObjectsOwtPool + + dgsRententionTimeouts' = Map.alter fn (addTime dpMinObtainedButNotAckedObjectsLifetime now) dgsRententionTimeouts + where + fn :: Maybe [objectId] -> Maybe [objectId] + fn Nothing = Just [objectId] + fn (Just objectIds) = Just $! (objectId : objectIds) + + dgsObjectReferenceCounts' = Map.alter fn objectId dgsObjectReferenceCounts + where + fn :: Maybe Int -> Maybe Int + fn Nothing = Just 1 + fn (Just n) = Just $! succ n + + dgsObjectsPending' = Map.insert objectId (Just object) dgsObjectsPending + + dgsPeerStates' = Map.update fn peerAddr dgsPeerStates + where + fn ps = Just $! ps{dpsObjectsOwtPool = Map.delete objectId (dpsObjectsOwtPool ps)} \ No newline at end of file From fed9f0d09c8bbba827924504a735341b3092f325 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Tue, 7 Oct 2025 15:54:42 +0200 Subject: [PATCH 08/43] WIP before removing dgsObjectsPending --- .../ObjectDiffusion/Inbound/V2.hs | 2 +- .../ObjectDiffusion/Inbound/V2/Decision.hs | 62 ++-- .../ObjectDiffusion/Inbound/V2/Policy.hs | 11 +- .../ObjectDiffusion/Inbound/V2/Registry.hs | 300 +++++------------- .../ObjectDiffusion/Inbound/V2/State.hs | 258 ++++++--------- .../ObjectDiffusion/Inbound/V2/Types.hs | 98 +++--- 6 files changed, 276 insertions(+), 455 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 245788dd77..58e100ea79 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -91,7 +91,7 @@ objectDiffusionInbound -- Only attempt to add objects if we have some work to do when (collected > 0) $ do -- submitObjectToPool traces: - -- \* `TraceObjectDiffusionProcessed`, + -- \* `TraceObjectDiffusionInboundAddedObjects`, -- \* `TraceObjectInboundAddedToObjectPool`, and -- \* `TraceObjectInboundRejectedFromObjectPool` -- events. diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 917495b019..2c69f783fb 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -10,7 +10,7 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision ( PeerDecision (..) - , emptyPeerDecision + , mempty -- * Internal API exposed for testing , makeDecisions @@ -44,7 +44,7 @@ makeDecisions :: , Ord objectId , Hashable peerAddr ) => - -- | decision policy + -- | decision decisionPolicy DecisionPolicy -> -- | decision context DecisionGlobalState peerAddr objectId object -> @@ -57,11 +57,11 @@ makeDecisions :: ( DecisionGlobalState peerAddr objectId object , Map peerAddr (PeerDecision objectId object) ) -makeDecisions policy st = +makeDecisions decisionPolicy st = let (salt, rng') = random (dgsRng st) st' = st{dgsRng = rng'} in fn - . pickObjectsToDownload policy st' + . pickObjectsToDownload decisionPolicy st' . orderByRejections salt where fn :: @@ -83,7 +83,7 @@ orderByRejections :: Map peerAddr (DecisionPeerState objectId object) -> [(peerAddr, DecisionPeerState objectId object)] orderByRejections salt = - List.sortOn (\(peerAddr, ps) -> (dpsScore ps, hashWithSalt salt peerAddr)) + List.sortOn (\(peerAddr, ps) -> hashWithSalt salt peerAddr) . Map.toList -- | Internal state of `pickObjectsToDownload` computation. @@ -91,12 +91,12 @@ data DecisionInternalState peerAddr objectId object = DecisionInternalState { disNumObjectsInflight :: !NumObjectsReq -- ^ number of all `object`s in-flight. - , disObjectsInflightMultiplicities :: !(Map objectId Int) + , disObjectsInflightMultiplicities :: !(Map objectId ObjectMultiplicity) -- ^ `objectId`s in-flight. - , disIdsToAckMultiplicities :: !(Map objectId Int) + , disIdsToAckMultiplicities :: !(Map objectId ObjectMultiplicity) -- ^ acknowledged `objectId` with multiplicities. It is used to update - -- `dgsObjectReferenceCounts`. - , disObjectsOwtPoolIds :: Set objectId + -- `dgsObjectsLiveMultiplicities`. + , disObjectsOwtPoolIds :: Map objectId object -- ^ objects on their way to the objectpool. Used to prevent issueing new -- fetch requests for them. } @@ -117,7 +117,7 @@ pickObjectsToDownload :: ( Ord peerAddr , Ord objectId ) => - -- | decision policy + -- | decision decisionPolicy DecisionPolicy -> -- | shared state DecisionGlobalState peerAddr objectId object -> @@ -126,7 +126,7 @@ pickObjectsToDownload :: , [(peerAddr, PeerDecision objectId object)] ) pickObjectsToDownload - policy@DecisionPolicy + decisionPolicy@DecisionPolicy { dpMaxNumObjectsInflightPerPeer , dpMaxNumObjectsInflightTotal , dpMaxObjectInflightMultiplicity @@ -135,8 +135,8 @@ pickObjectsToDownload { dgsPeerStates , dgsObjectsInflightMultiplicities , dgsObjectsPending - , dgsObjectsOwtPool - , dgsObjectReferenceCounts + , dgsObjectsOwtPoolMultiplicities + , dgsObjectsLiveMultiplicities } = -- outer fold: fold `[(peerAddr, DecisionPeerState objectId object)]` List.mapAccumR @@ -147,7 +147,7 @@ pickObjectsToDownload , disNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 dgsObjectsInflightMultiplicities -- Thomas: not sure here if we must count disctinct objects in flight, or total number of objects in flight (considering multiplicities) , disIdsToAckMultiplicities = Map.empty - , disObjectsOwtPoolIds = Map.keysSet dgsObjectsOwtPool + , disObjectsOwtPoolIds = dgsObjectsOwtPoolMultiplicities } >>> gn where @@ -184,7 +184,7 @@ pickObjectsToDownload , pdObjectsOwtPool , RefCountDiff{rcdIdsToAckMultiplicities} , peerObjectState' - ) = acknowledgeObjectIds policy sharedState peerObjectState + ) = acknowledgeObjectIds decisionPolicy sharedState peerObjectState disIdsToAckMultiplicities' = Map.unionWith (+) disIdsToAckMultiplicities rcdIdsToAckMultiplicities disObjectsOwtPoolIds' = @@ -218,7 +218,7 @@ pickObjectsToDownload ( st , ( (peerAddr, peerObjectState') - , emptyPeerDecision + , mempty ) ) else @@ -279,7 +279,7 @@ pickObjectsToDownload , pdObjectsOwtPool , RefCountDiff{rcdIdsToAckMultiplicities} , peerObjectState'' - ) = acknowledgeObjectIds policy sharedState peerObjectState' + ) = acknowledgeObjectIds decisionPolicy sharedState peerObjectState' disIdsToAckMultiplicities' = Map.unionWith (+) disIdsToAckMultiplicities rcdIdsToAckMultiplicities @@ -326,7 +326,7 @@ pickObjectsToDownload } , ( (peerAddr, peerObjectState'') - , emptyPeerDecision{pdObjectsToReqIds = pdObjectsToReqIdsMap} + , mempty{pdObjectsToReqIds = pdObjectsToReqIdsMap} ) ) @@ -348,7 +348,7 @@ pickObjectsToDownload Map.fromList ((\(a, _) -> a) <$> as) <> dgsPeerStates - dgsObjectReferenceCounts' = + dgsObjectsLiveMultiplicities' = Map.merge (Map.mapMaybeMissing \_ x -> Just x) (Map.mapMaybeMissing \_ _ -> assert False Nothing) @@ -357,23 +357,23 @@ pickObjectsToDownload then Just $! x - y else Nothing ) - dgsObjectReferenceCounts + dgsObjectsLiveMultiplicities disIdsToAckMultiplicities - liveSet = Map.keysSet dgsObjectReferenceCounts' + liveSet = Map.keysSet dgsObjectsLiveMultiplicities' dgsObjectsPending' = dgsObjectsPending `Map.restrictKeys` liveSet - dgsObjectsOwtPool' = - List.foldl' updateInSubmissionToObjectPoolObjects dgsObjectsOwtPool as + dgsObjectsOwtPoolMultiplicities' = + List.foldl' updateInSubmissionToObjectPoolObjects dgsObjectsOwtPoolMultiplicities as in ( sharedState { dgsPeerStates = dgsPeerStates' , dgsObjectsInflightMultiplicities = disObjectsInflightMultiplicities , dgsObjectsPending = dgsObjectsPending' - , dgsObjectReferenceCounts = dgsObjectReferenceCounts' - , dgsObjectsOwtPool = dgsObjectsOwtPool' + , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' + , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' } , -- exclude empty results mapMaybe @@ -421,15 +421,15 @@ filterActivePeers :: DecisionGlobalState peerAddr objectId object -> Map peerAddr (DecisionPeerState objectId object) filterActivePeers - policy@DecisionPolicy + decisionPolicy@DecisionPolicy { dpMaxNumObjectsOutstanding , dpMaxObjectInflightMultiplicity } - sharedObjectState@DecisionGlobalState + globalState@DecisionGlobalState { dgsPeerStates , dgsObjectsPending , dgsObjectsInflightMultiplicities - , dgsObjectsOwtPool + , dgsObjectsOwtPoolMultiplicities } = Map.filter gn dgsPeerStates where @@ -449,7 +449,7 @@ filterActivePeers where -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. - (pdIdsToReq, _, unackedObjectIds) = splitAcknowledgedObjectIds policy sharedObjectState peerObjectState + (pdIdsToReq, _, unackedObjectIds) = splitAcknowledgedObjectIds decisionPolicy globalState peerObjectState numOfUnacked = fromIntegral (StrictSeq.length unackedObjectIds) gn :: DecisionPeerState objectId object -> Bool @@ -472,11 +472,11 @@ filterActivePeers `Set.difference` dpsObjectsInflightIds `Set.difference` dpsObjectsRequestedButNotReceivedIds `Set.difference` unrequestable - `Set.difference` Map.keysSet dgsObjectsOwtPool + `Set.difference` Map.keysSet dgsObjectsOwtPoolMultiplicities -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. - (pdIdsToReq, _, _) = splitAcknowledgedObjectIds policy sharedObjectState peerObjectState + (pdIdsToReq, _, _) = splitAcknowledgedObjectIds decisionPolicy globalState peerObjectState -- -- Auxiliary functions diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs index 3d56653a89..20c7a010cc 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -10,6 +10,7 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Control.Monad.Class.MonadTime.SI import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq (..), NumObjectsOutstanding, NumObjectsReq (..)) +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types (ObjectMultiplicity) -- | Policy for making decisions data DecisionPolicy = DecisionPolicy @@ -21,17 +22,11 @@ data DecisionPolicy = DecisionPolicy -- ^ a limit of objects in-flight from a single peer, plus or minus 1. , dpMaxNumObjectsInflightTotal :: !NumObjectsReq -- ^ a limit of object size in-flight from all peers, plus or minus 1 - , dpMaxObjectInflightMultiplicity :: !Int + , dpMaxObjectInflightMultiplicity :: !ObjectMultiplicity -- ^ from how many peers download the `objectId` simultaneously , dpMinObtainedButNotAckedObjectsLifetime :: !DiffTime -- ^ how long objects that have been added to the objectpool will be -- kept in the `dgsObjectsPending` cache. - , dpScoreDrainRate :: !Double - -- ^ rate at which "rejected" objects drain. Unit: object/seconds. - -- TODO: still relevant? - , dpScoreMaxRejections :: !Double - -- ^ Maximum number of "rejections". Unit: seconds - -- TODO: still relevant? } deriving Show @@ -44,6 +39,4 @@ defaultDecisionPolicy = , dpMaxNumObjectsInflightTotal = NumObjectsReq 20 , dpMaxObjectInflightMultiplicity = 2 , dpMinObtainedButNotAckedObjectsLifetime = 2 - , dpScoreDrainRate = 0.1 - , dpScoreMaxRejections = 15 * 60 } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index dbb592a709..59b518d154 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -6,22 +6,20 @@ {-# LANGUAGE ScopedTypeVariables #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry - ( PeerDecisionChannels (..) + ( PeerDecisionChannels , PeerDecisionChannelsVar , ObjectPoolSem , DecisionGlobalStateVar - , newDecisionGlobalStateVar , newPeerDecisionChannelsVar , newObjectPoolSem , InboundPeerAPI (..) - , decisionLogicThreads , withPeer + , decisionLogicThread ) where import Control.Concurrent.Class.MonadMVar.Strict import Control.Concurrent.Class.MonadSTM.Strict import Control.Concurrent.Class.MonadSTM.TSem -import Control.Monad.Class.MonadAsync import Control.Monad.Class.MonadFork import Control.Monad.Class.MonadThrow import Control.Monad.Class.MonadTime.SI @@ -31,18 +29,17 @@ import Data.Foldable as Foldable (foldl', traverse_) import Data.Hashable import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map -import Data.Maybe (fromMaybe) -import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set import Data.Typeable (Typeable) import Data.Void (Void) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State (DecisionGlobalStateVar) +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State qualified as State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API -import Ouroboros.Network.Protocol.ObjectDiffusion.Type +import Control.Monad (forever) -- | Communication channels between `ObjectDiffusion` mini-protocol inbound side -- and decision logic. @@ -54,7 +51,7 @@ type PeerDecisionChannelsVar m peerAddr objectId object = newPeerDecisionChannelsVar :: MonadMVar m => m (PeerDecisionChannelsVar m peerAddr objectId object) -newPeerDecisionChannelsVar = newMVar (PeerDecisionChannels Map.empty) +newPeerDecisionChannelsVar = newMVar (Map.empty) -- | Semaphore to guard access to the ObjectPool newtype ObjectPoolSem m = ObjectPoolSem (TSem m) @@ -91,7 +88,7 @@ withPeer :: DecisionPolicy -> DecisionGlobalStateVar m peerAddr objectId object -> ObjectPoolReader objectId object ticketNo m -> - ObjectPoolWriter objectId object ticketNo m -> + ObjectPoolWriter objectId object m -> -- | new peer peerAddr -> -- | callback which gives access to `InboundPeerAPI` @@ -101,12 +98,12 @@ withPeer decisionTracer decisionChannelsVar (ObjectPoolSem poolSem) - policy@DecisionPolicy{dpMinObtainedButNotAckedObjectsLifetime} + decisionPolicy globalStateVar - ObjectPoolReader{} - ObjectPoolWriter{opwAddObjects} + objectPoolReader + objectPoolWriter peerAddr - withApi = + withAPI = bracket registerPeerAndCreateAPI unregisterPeer withAPI where registerPeerAndCreateAPI :: m (InboundPeerAPI m objectId object) @@ -129,24 +126,9 @@ withPeer ( peerToChannel' , InboundPeerAPI { readPeerDecision = takeMVar chan' - , handleReceivedIds = - collectIds - decisionTracer - globalStateVar - objectpoolGetSnapshot - peerAddr - numObjectIdsToReq - objectIdsSeq - objectIdsMap - , handleReceivedObjects = - collectObjects - decisionTracer - objectSize - globalStateVar - peerAddr - objectIds - objects - , submitObjectsToPool + , handleReceivedIds = State.handleReceivedIds + , handleReceivedObjects = State.handleReceivedObjects + , submitObjectsToPool = State.submitObjectsToPool } ) -- register the peer in the global state now @@ -178,14 +160,10 @@ withPeer Map.insert peerAddr DecisionPeerState - { dpsIdsAvailable = Map.empty + { dpsIdsAvailable = Set.empty , dpsNumIdsInflight = 0 - , dpsObjectsInflightIdsSize = 0 , dpsObjectsInflightIds = Set.empty , dpsOutstandingFifo = StrictSeq.empty - , dpsObjectsRequestedButNotReceivedIds = Set.empty - , dpsScore = 0 - , dpsScoreLastUpdatedAt = Time 0 , dpsObjectsPending = Map.empty , dpsObjectsOwtPool = Map.empty } @@ -201,163 +179,75 @@ withPeer st@DecisionGlobalState { dgsPeerStates , dgsObjectsPending - , dgsObjectReferenceCounts + , dgsObjectsLiveMultiplicities , dgsObjectsInflightMultiplicities - , dgsObjectsInflightMultiplicitiesSize - , dgsObjectsOwtPool + , dgsObjectsOwtPoolMultiplicities } = st { dgsPeerStates = dgsPeerStates' , dgsObjectsPending = dgsObjectsPending' - , dgsObjectReferenceCounts = dgsObjectReferenceCounts' + , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' , dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' - , dgsObjectsInflightMultiplicitiesSize = dgsObjectsInflightMultiplicitiesSize' - , dgsObjectsOwtPool = dgsObjectsOwtPool' + , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' } where + -- First extract the DPS of the specified peer from the DGS ( DecisionPeerState { dpsOutstandingFifo , dpsObjectsInflightIds - , dpsObjectsInflightIdsSize , dpsObjectsOwtPool } , dgsPeerStates' ) = Map.alterF ( \case - Nothing -> error ("ObjectDiffusion.withPeer: invariant violation for peer " ++ show peerAddr) + Nothing -> error ("ObjectDiffusion.withPeer: can't unregister peer " ++ show peerAddr ++ " because it isn't registered") Just a -> (a, Nothing) ) peerAddr dgsPeerStates + + decreaseCount :: Map objectId ObjectMultiplicity -> objectId -> Map objectId ObjectMultiplicity + decreaseCount mmap objectId = + Map.update + (\n -> if n > 1 then Just $! pred n else Nothing) + objectId + mmap - dgsObjectReferenceCounts' = + -- Update the dgsObjectsLiveMultiplicities map by decreasing the count of each + -- objectId which is part of the dpsOutstandingFifo of this peer. + dgsObjectsLiveMultiplicities' = Foldable.foldl' - ( flip $ Map.update \cnt -> - if cnt > 1 - then Just $! pred cnt - else Nothing - ) - dgsObjectReferenceCounts + decreaseCount + dgsObjectsLiveMultiplicities dpsOutstandingFifo - liveSet = Map.keysSet dgsObjectReferenceCounts' + -- Update dgsObjectsPending map to only contain live objects + -- (to reflect the new state of dgsObjectsLiveMultiplicities). + liveSet = Map.keysSet dgsObjectsLiveMultiplicities' + dgsObjectsPending' = Map.restrictKeys dgsObjectsPending liveSet - dgsObjectsPending' = - dgsObjectsPending - `Map.restrictKeys` liveSet - - dgsObjectsInflightMultiplicities' = Foldable.foldl' purgeInflightObjects dgsObjectsInflightMultiplicities dpsObjectsInflightIds - dgsObjectsInflightMultiplicitiesSize' = dgsObjectsInflightMultiplicitiesSize - dpsObjectsInflightIdsSize + -- Update dgsInflightMultiplicities map by decreasing the count + -- of objects that were in-flight for this peer. + dgsObjectsInflightMultiplicities' = + Foldable.foldl' + decreaseCount + dgsObjectsInflightMultiplicities + dpsObjectsInflightIds - -- When we unregister a peer, we need to subtract all objects in the - -- `dpsObjectsOwtPool`, as they will not be submitted to the objectpool. - dgsObjectsOwtPool' = + -- Finally, we need to update dgsObjectsOwtPoolMultiplicities by decreasing the count of + -- each objectId which is part of the dpsObjectsOwtPool of this peer. + dgsObjectsOwtPoolMultiplicities' = Foldable.foldl' - ( flip $ Map.update \cnt -> - if cnt > 1 - then Just $! pred cnt - else Nothing - ) - dgsObjectsOwtPool + decreaseCount + dgsObjectsOwtPoolMultiplicities (Map.keysSet dpsObjectsOwtPool) - purgeInflightObjects m objectId = Map.alter fn objectId m - where - fn (Just n) | n > 1 = Just $! pred n - fn _ = Nothing - --- Update `dpsScore` & `dpsScoreLastUpdatedAt` fields of `DecisionPeerState`, return the new --- updated `dpsScore`. --- --- PRECONDITION: the `Double` argument is non-negative. -countRejectedObjects :: - Time -> - Double -> - m Double -countRejectedObjects _ n - | n < 0 = - error ("ObjectDiffusion.countRejectedObjects: invariant violation for peer " ++ show peerAddr) -countRejectedObjects now n = atomically $ stateTVar globalStateVar $ \st -> - let (result, dgsPeerStates') = Map.alterF fn peerAddr (dgsPeerStates st) - in (result, st{dgsPeerStates = dgsPeerStates'}) - where - fn :: Maybe (DecisionPeerState objectId object) -> (Double, Maybe (DecisionPeerState objectId object)) - fn Nothing = error ("ObjectDiffusion.withPeer: invariant violation for peer " ++ show peerAddr) - fn (Just ps) = (dpsScore ps', Just $! ps') - where - ps' = updateRejects policy now n ps - -updateRejects :: - DecisionPolicy -> - Time -> - Double -> - DecisionPeerState objectId object -> - DecisionPeerState objectId object -updateRejects _ now 0 pts | dpsScore pts == 0 = pts{dpsScoreLastUpdatedAt = now} -updateRejects - DecisionPolicy{dpScoreDrainRate, dpScoreMaxRejections} - now - n - pts@DecisionPeerState{dpsScore, dpsScoreLastUpdatedAt} = - let duration = diffTime now dpsScoreLastUpdatedAt - !drain = realToFrac duration * dpScoreDrainRate - !drained = max 0 $ dpsScore - drain - in pts - { dpsScore = min dpScoreMaxRejections $ drained + n - , dpsScoreLastUpdatedAt = now - } - -drainRejectionThread :: - forall m peerAddr objectId object. - ( MonadDelay m - , MonadSTM m - , MonadThread m - , Ord objectId - ) => - Tracer m (TraceDecisionLogic peerAddr objectId object) -> - DecisionPolicy -> - DecisionGlobalStateVar m peerAddr objectId object -> - m Void -drainRejectionThread decisionTracer policy globalStateVar = do - labelThisThread "object-rejection-drain" - now <- getMonotonicTime - go $ addTime drainInterval now - where - drainInterval :: DiffTime - drainInterval = 7 - - go :: Time -> m Void - go !nextDrain = do - threadDelay 1 - - !now <- getMonotonicTime - st'' <- atomically $ do - st <- readTVar globalStateVar - let ptss = - if now > nextDrain - then Map.map (updateRejects policy now 0) (dgsPeerStates st) - else dgsPeerStates st - st' = - tickTimedObjects - now - st - { dgsPeerStates = ptss - } - writeTVar globalStateVar st' - return st' - traceWith decisionTracer (TraceDecisionGlobalState "drainRejectionThread" st'') - - if now > nextDrain - then go $ addTime drainInterval now - else go nextDrain - decisionLogicThread :: forall m peerAddr objectId object. ( MonadDelay m , MonadMVar m , MonadSTM m - , MonadMask m , MonadFork m , Ord peerAddr , Ord objectId @@ -369,72 +259,54 @@ decisionLogicThread :: PeerDecisionChannelsVar m peerAddr objectId object -> DecisionGlobalStateVar m peerAddr objectId object -> m Void -decisionLogicThread decisionTracer counterTracer policy objectChannelsVar globalStateVar = do - labelThisThread "object-decision" - go - where - go :: m Void - go = do +decisionLogicThread decisionTracer countersTracer decisionPolicy decisionChannelsVar globalStateVar = do + labelThisThread "ObjectDiffusionInbound.decisionLogicThread" + forever $ do -- We rate limit the decision making process, it could overwhelm the CPU -- if there are too many inbound connections. threadDelay _DECISION_LOOP_DELAY - (decisions, st) <- atomically do - sharedObjectState <- readTVar globalStateVar - let activePeers = filterActivePeers policy sharedObjectState + -- Make decisions and update the global state var accordingly + (decisions, globalState') <- atomically $ do + globalState <- readTVar globalStateVar + let activePeers = filterActivePeers decisionPolicy globalState -- block until at least one peer is active check (not (Map.null activePeers)) - let (sharedState, decisions) = makeDecisions policy sharedObjectState activePeers - writeTVar globalStateVar sharedState - return (decisions, sharedState) - traceWith decisionTracer (TraceDecisionGlobalState "decisionLogicThread" st) - traceWith decisionTracer (TracePeerDecisions decisions) - PeerDecisionChannels{peerToChannel} <- readMVar objectChannelsVar + let (globalState', decisions) = makeDecisions decisionPolicy globalState activePeers + writeTVar globalStateVar globalState' + return (decisions, globalState') + + traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "decisionLogicThread" globalState') + traceWith decisionTracer (TraceDecisionLogicDecisionsMade decisions) + peerToChannel <- readMVar decisionChannelsVar + -- Pair decision channel with the corresponding decision + let peerToChannelAndDecision = + Map.intersectionWith + (,) + peerToChannel + decisions + -- Send the decisions to the corresponding peers + -- Note that decisions are incremental, so we merge the old one to the new one (using the semigroup instance) if there is an old one traverse_ - (\(mvar, d) -> modifyMVarWithDefault_ mvar d (\d' -> pure (d' <> d))) - ( Map.intersectionWith - (,) - peerToChannel - decisions - ) - traceWith counterTracer (makeObjectDiffusionCounters st) - go + (\(chan, newDecision) -> + modifyMVarWithDefault_ + chan newDecision (\oldDecision -> pure (oldDecision <> newDecision))) + peerToChannelAndDecision - -- Variant of modifyMVar_ that puts a default value if the MVar is empty. - modifyMVarWithDefault_ :: StrictMVar m a -> a -> (a -> m a) -> m () - modifyMVarWithDefault_ m d io = - mask $ \restore -> do - mbA <- tryTakeMVar m - case mbA of - Just a -> do - a' <- restore (io a) `onException` putMVar m a - putMVar m a' - Nothing -> putMVar m d + traceWith countersTracer (makeObjectDiffusionCounters globalState') --- | Run `decisionLogicThread` and `drainRejectionThread`. -decisionLogicThreads :: - forall m peerAddr objectId object. - ( MonadDelay m - , MonadMVar m - , MonadMask m - , MonadAsync m - , MonadFork m - , Ord peerAddr - , Ord objectId - , Hashable peerAddr - ) => - Tracer m (TraceDecisionLogic peerAddr objectId object) -> - Tracer m ObjectDiffusionCounters -> - DecisionPolicy -> - PeerDecisionChannelsVar m peerAddr objectId object -> - DecisionGlobalStateVar m peerAddr objectId object -> - m Void -decisionLogicThreads decisionTracer counterTracer policy objectChannelsVar globalStateVar = - uncurry (<>) - <$> drainRejectionThread decisionTracer policy globalStateVar - `concurrently` decisionLogicThread decisionTracer counterTracer policy objectChannelsVar globalStateVar +-- Variant of modifyMVar_ that puts a default value if the MVar is empty. +modifyMVarWithDefault_ :: StrictMVar m a -> a -> (a -> m a) -> m () +modifyMVarWithDefault_ m d io = + mask $ \restore -> do + mbA <- tryTakeMVar m + case mbA of + Just a -> do + a' <- restore (io a) `onException` putMVar m a + putMVar m a' + Nothing -> putMVar m d -- `5ms` delay _DECISION_LOOP_DELAY :: DiffTime diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 28a0c2f9a7..47b5922ad1 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -5,6 +5,7 @@ {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} +{-# LANGUAGE BangPatterns #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State ( -- * Core API @@ -12,9 +13,9 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State , DecisionPeerState (..) , DecisionGlobalStateVar , newDecisionGlobalStateVar - , collectIds - , collectObjects - , submitObjectToPool + , handleReceivedIds + , handleReceivedObjects + , submitObjectsToPool , acknowledgeObjectIds , splitAcknowledgedObjectIds , tickTimedObjects @@ -23,8 +24,8 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State -- * Internals, only exported for testing purposes: , RefCountDiff (..) , updateRefCounts - , collectIdsImpl - , collectObjectsImpl + , handleReceivedIdsImpl + , handleReceivedObjectsImpl ) where import Control.Concurrent.Class.MonadSTM.Strict @@ -46,7 +47,7 @@ import Data.Typeable (Typeable) import GHC.Stack (HasCallStack) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (SizeInBytes (..), ObjectPoolWriter (opwHasObject)) +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (SizeInBytes (..), ObjectPoolWriter (opwHasObject, opwObjectId)) import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsAck (..)) import System.Random (StdGen) @@ -72,13 +73,12 @@ acknowledgeObjectIds :: ) {-# INLINE acknowledgeObjectIds #-} acknowledgeObjectIds - policy - sharedObjectState + decisionPolicy + globalState ps@DecisionPeerState { dpsIdsAvailable , dpsNumIdsInflight , dpsObjectsPending - , dpsScore , dpsObjectsOwtPool } = -- We can only acknowledge objectIds when we can request new ones, since @@ -96,7 +96,6 @@ acknowledgeObjectIds dpsNumIdsInflight + pdIdsToReq , dpsObjectsPending = dpsObjectsPending' - , dpsScore = dpsScore' , dpsObjectsOwtPool = dpsObjectsOwtPool' } ) @@ -111,12 +110,12 @@ acknowledgeObjectIds -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. (pdIdsToReq, acknowledgedObjectIds, dpsOutstandingFifo') = - splitAcknowledgedObjectIds policy sharedObjectState ps + splitAcknowledgedObjectIds decisionPolicy globalState ps objectsOwtPoolList = [ (objectId, object) | objectId <- toList toObjectPoolObjectIds - , objectId `Map.notMember` dgsObjectsPending sharedObjectState + , objectId `Map.notMember` dgsObjectsPending globalState , object <- maybeToList $ objectId `Map.lookup` dpsObjectsPending ] (toObjectPoolObjectIds, _) = @@ -134,7 +133,6 @@ acknowledgeObjectIds Map.filterWithKey (\objectId _ -> objectId `Map.notMember` objectsOwtPool) ackedDownloadedObjects - dpsScore' = dpsScore + fromIntegral (Map.size lateObjects) -- the set of live `objectIds` liveSet = Set.fromList (toList dpsOutstandingFifo') @@ -216,7 +214,7 @@ updateRefCounts :: Map objectId Int -> RefCountDiff objectId -> Map objectId Int -updateRefCounts dgsObjectReferenceCounts (RefCountDiff diff) = +updateRefCounts dgsObjectsLiveMultiplicities (RefCountDiff diff) = Map.merge (Map.mapMaybeMissing \_ x -> Just x) (Map.mapMaybeMissing \_ _ -> Nothing) @@ -227,7 +225,7 @@ updateRefCounts dgsObjectReferenceCounts (RefCountDiff diff) = then Just $! x - y else Nothing ) - dgsObjectReferenceCounts + dgsObjectsLiveMultiplicities diff tickTimedObjects :: @@ -240,7 +238,7 @@ tickTimedObjects now st@DecisionGlobalState { dgsRententionTimeouts - , dgsObjectReferenceCounts + , dgsObjectsLiveMultiplicities , dgsObjectsPending } = let (expiredObjects', dgsRententionTimeouts') = @@ -252,12 +250,12 @@ tickTimedObjects (expired, Nothing, timed) -> (expired, timed) refDiff = Map.foldl' fn Map.empty expiredObjects' - dgsObjectReferenceCounts' = updateRefCounts dgsObjectReferenceCounts (RefCountDiff refDiff) - liveSet = Map.keysSet dgsObjectReferenceCounts' + dgsObjectsLiveMultiplicities' = updateRefCounts dgsObjectsLiveMultiplicities (RefCountDiff refDiff) + liveSet = Map.keysSet dgsObjectsLiveMultiplicities' dgsObjectsPending' = dgsObjectsPending `Map.restrictKeys` liveSet in st { dgsRententionTimeouts = dgsRententionTimeouts' - , dgsObjectReferenceCounts = dgsObjectReferenceCounts' + , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' , dgsObjectsPending = dgsObjectsPending' } where @@ -285,11 +283,11 @@ tickTimedObjects -- | Insert received `objectId`s and return the number of objectIds to be acknowledged -- and the updated `DecisionGlobalState`. -collectIdsImpl :: +handleReceivedIdsImpl :: forall peerAddr object objectId. (Ord objectId, Ord peerAddr, HasCallStack) => -- | check if objectId is in the objectpool, ref - -- 'objectpoolHasObject' + -- 'opwHasObject' (objectId -> Bool) -> peerAddr -> -- | number of requests to subtract from @@ -301,8 +299,8 @@ collectIdsImpl :: Set objectId -> DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object -collectIdsImpl - objectpoolHasObject +handleReceivedIdsImpl + opwHasObject peerAddr reqNo objectIdsSeq @@ -310,7 +308,7 @@ collectIdsImpl st@DecisionGlobalState { dgsPeerStates , dgsObjectsPending - , dgsObjectReferenceCounts + , dgsObjectsLiveMultiplicities } = -- using `alterF` so the update of `DecisionPeerState` is done in one lookup case Map.alterF @@ -342,7 +340,7 @@ collectIdsImpl -- Divide the new objectIds in two: those that are already in the objectpool -- and those that are not. We'll request some objects from the latter. (ignoredObjectIds, dpsIdsAvailableSet) = - Set.partition objectpoolHasObject objectIdsSet + Set.partition opwHasObject objectIdsSet -- Add all `objectIds` from `dpsIdsAvailableMap` which are not -- unacknowledged or already buffered. Unacknowledged objectIds must have @@ -368,7 +366,7 @@ collectIdsImpl dgsObjectsPending <> Map.fromList ((, Nothing) <$> Set.toList ignoredObjectIds) - dgsObjectReferenceCounts' = + dgsObjectsLiveMultiplicities' = Foldable.foldl' ( flip $ Map.alter @@ -377,13 +375,13 @@ collectIdsImpl Just cnt -> Just $! succ cnt ) ) - dgsObjectReferenceCounts + dgsObjectsLiveMultiplicities objectIdsSeq st' = st { dgsObjectsPending = dgsObjectsPending' - , dgsObjectReferenceCounts = dgsObjectReferenceCounts' + , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' } ps' = assert @@ -399,7 +397,7 @@ collectIdsImpl const_MAX_OBJECT_SIZE_DISCREPENCY :: SizeInBytes const_MAX_OBJECT_SIZE_DISCREPENCY = 32 -collectObjectsImpl :: +handleReceivedObjectsImpl :: forall peerAddr object objectId. ( Ord peerAddr , Ord objectId @@ -420,7 +418,7 @@ collectObjectsImpl :: Either ObjectDiffusionInboundError (DecisionGlobalState peerAddr objectId object) -collectObjectsImpl +handleReceivedObjectsImpl objectSize peerAddr requestedObjectIdsMap @@ -559,15 +557,15 @@ newDecisionGlobalStateVar rng = { dgsPeerStates = Map.empty , dgsObjectsInflightMultiplicities = Map.empty , dgsObjectsPending = Map.empty - , dgsObjectReferenceCounts = Map.empty + , dgsObjectsLiveMultiplicities = Map.empty , dgsRententionTimeouts = Map.empty - , dgsObjectsOwtPool = Map.empty + , dgsObjectsOwtPoolMultiplicities = Map.empty , dgsRng = rng } -- | Acknowledge `objectId`s, return the number of `objectIds` to be acknowledged to the -- remote side. -collectIds :: +handleReceivedIds :: forall m peerAddr ticketNo object objectId. (MonadSTM m, Ord objectId, Ord peerAddr) => Tracer m (TraceDecisionLogic peerAddr objectId object) -> @@ -582,17 +580,17 @@ collectIds :: -- | received `objectId`s Set objectId -> m () -collectIds tracer sharedVar objectPoolWriter peerAddr reqNo objectIdsSeq objectIds = do +handleReceivedIds tracer sharedVar objectPoolWriter peerAddr reqNo objectIdsSeq objectIds = do st <- atomically $ do hasObject <- opwHasObject objectPoolWriter stateTVar sharedVar - ((\a -> (a, a)) . collectIdsImpl hasObject peerAddr reqNo objectIdsSeq objectIds) - traceWith tracer (TraceDecisionGlobalState "collectIds" st) + ((\a -> (a, a)) . handleReceivedIdsImpl hasObject peerAddr reqNo objectIdsSeq objectIds) + traceWith tracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedIds" st) -- | Include received `object`s in `DecisionGlobalState`. Return number of `objectIds` -- to be acknowledged and list of `object` to be added to the objectpool. -collectObjects :: +handleReceivedObjects :: forall m peerAddr object objectId. ( MonadSTM m , Ord objectId @@ -611,145 +609,85 @@ collectObjects :: -- | number of objectIds to be acknowledged and objects to be added to the -- objectpool m (Maybe ObjectDiffusionInboundError) -collectObjects tracer objectSize sharedVar peerAddr objectIdsRequested objectsMap = do +handleReceivedObjects tracer objectSize sharedVar peerAddr objectIdsRequested objectsMap = do r <- atomically $ do st <- readTVar sharedVar - case collectObjectsImpl objectSize peerAddr objectIdsRequested objectsMap st of + case handleReceivedObjectsImpl objectSize peerAddr objectIdsRequested objectsMap st of r@(Right st') -> writeTVar sharedVar st' $> r r@Left{} -> pure r case r of Right st -> - traceWith tracer (TraceDecisionGlobalState "collectObjects" st) + traceWith tracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedObjects" st) $> Nothing Left e -> return (Just e) -submitObjectToPool :: - Tracer m (TraceObjectDiffusionInbound objectId object) -> objectId -> object -> m () -submitObjectToPool objectTracer objectId object = +submitObjectsToPool :: + Tracer m (TraceObjectDiffusionInbound objectId object) -> ObjectPoolWriter objectId object m -> [object] -> m () +submitObjectsToPool tracer objectPoolWriter objects = bracket_ (atomically $ waitTSem poolSem) (atomically $ signalTSem poolSem) $ do - start <- getMonotonicTime - res <- addObject - end <- getMonotonicTime - atomically $ modifyTVar globalStateVar (updateBufferedObject end res) - let duration = end `diffTime` start - case res of - ObjectAccepted -> traceWith objectTracer (TraceObjectInboundAddedToObjectPool [objectId] duration) - ObjectRejected -> traceWith objectTracer (TraceObjectInboundRejectedFromObjectPool [objectId] duration) + opwAddObjects objectPoolWriter objects + now <- getMonotonicTime + traceWith tracer (TraceObjectDiffusionInboundSubmittedObjects (NumObjectsProcessed $ length objects)) + atomically $ + let getId = opwObjectId objectPoolWriter in + modifyTVar globalStateVar $ \globalState -> + foldl' (\st object -> updateObjectsOwtPool now (getId object) object st) globalState where - -- add the object to the objectpool - addObject :: m ObjectObjectPoolResult - addObject = do - mpSnapshot <- atomically objectpoolGetSnapshot - - -- Note that checking if the objectpool contains a object before - -- spending several ms attempting to add it to the pool has - -- been judged immoral. - if objectpoolHasObject mpSnapshot objectId - then do - !now <- getMonotonicTime - !s <- countRejectedObjects now 1 - traceWith objectTracer $ - TraceObjectDiffusionProcessed - ProcessedObjectCount - { pobjectcAccepted = 0 - , pobjectcRejected = 1 - , pobjectcScore = s - } - return ObjectRejected - else do - acceptedObjects <- objectpoolAddObjects [object] - end <- getMonotonicTime - if null acceptedObjects - then do - !s <- countRejectedObjects end 1 - traceWith objectTracer $ - TraceObjectDiffusionProcessed - ProcessedObjectCount - { pobjectcAccepted = 0 - , pobjectcRejected = 1 - , pobjectcScore = s - } - return ObjectRejected - else do - !s <- countRejectedObjects end 0 - traceWith objectTracer $ - TraceObjectDiffusionProcessed - ProcessedObjectCount - { pobjectcAccepted = 1 - , pobjectcRejected = 0 - , pobjectcScore = s - } - return ObjectAccepted - - updateBufferedObject :: - Time -> - ObjectObjectPoolResult -> - DecisionGlobalState peerAddr objectId object -> - DecisionGlobalState peerAddr objectId object - updateBufferedObject - _ - ObjectRejected - st@DecisionGlobalState - { dgsPeerStates - , dgsObjectsOwtPool - } = - st - { dgsPeerStates = dgsPeerStates' - , dgsObjectsOwtPool = dgsObjectsOwtPool' - } - where - dgsObjectsOwtPool' = - Map.update - (\case 1 -> Nothing; n -> Just $! pred n) - objectId - dgsObjectsOwtPool - - dgsPeerStates' = Map.update fn peerAddr dgsPeerStates - where - fn ps = Just $! ps{dpsObjectsOwtPool = Map.delete objectId (dpsObjectsOwtPool ps)} - updateBufferedObject - now - ObjectAccepted - st@DecisionGlobalState - { dgsPeerStates - , dgsObjectsPending - , dgsObjectReferenceCounts - , dgsRententionTimeouts - , dgsObjectsOwtPool - } = - st - { dgsPeerStates = dgsPeerStates' - , dgsObjectsPending = dgsObjectsPending' - , dgsRententionTimeouts = dgsRententionTimeouts' - , dgsObjectReferenceCounts = dgsObjectReferenceCounts' - , dgsObjectsOwtPool = dgsObjectsOwtPool' - } - where - dgsObjectsOwtPool' = - Map.update - (\case 1 -> Nothing; n -> Just $! pred n) - objectId - dgsObjectsOwtPool - - dgsRententionTimeouts' = Map.alter fn (addTime dpMinObtainedButNotAckedObjectsLifetime now) dgsRententionTimeouts - where - fn :: Maybe [objectId] -> Maybe [objectId] - fn Nothing = Just [objectId] - fn (Just objectIds) = Just $! (objectId : objectIds) - - dgsObjectReferenceCounts' = Map.alter fn objectId dgsObjectReferenceCounts + updateObjectsOwtPool :: + Time -> + objectId -> + object -> + DecisionGlobalState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object + updateObjectsOwtPool + now + objectId + st@DecisionGlobalState + { dgsPeerStates + , dgsObjectsPending + , dgsObjectsLiveMultiplicities + , dgsRententionTimeouts + , dgsObjectsOwtPoolMultiplicities + } = + st + { dgsPeerStates = dgsPeerStates' + , dgsObjectsPending = dgsObjectsPending' + , dgsRententionTimeouts = dgsRententionTimeouts' + , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' + , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' + } where - fn :: Maybe Int -> Maybe Int - fn Nothing = Just 1 - fn (Just n) = Just $! succ n + dgsObjectsOwtPoolMultiplicities' = + Map.update + (\case 1 -> Nothing; n -> Just $! pred n) + objectId + dgsObjectsOwtPoolMultiplicities + + dgsRententionTimeouts' = + Map.alter + (\case Nothing -> Just [objectId]; Just objectIds -> Just (objectId : objectIds)) + (addTime dpMinObtainedButNotAckedObjectsLifetime now) + dgsRententionTimeouts + + dgsObjectsLiveMultiplicities' = + Map.alter + (\case Nothing -> Just 1; Just n -> Just $! succ n) + objectId + dgsObjectsLiveMultiplicities - dgsObjectsPending' = Map.insert objectId (Just object) dgsObjectsPending + dgsObjectsPending' = + Map.insert + objectId + (Just object) + dgsObjectsPending - dgsPeerStates' = Map.update fn peerAddr dgsPeerStates - where - fn ps = Just $! ps{dpsObjectsOwtPool = Map.delete objectId (dpsObjectsOwtPool ps)} \ No newline at end of file + dgsPeerStates' = + Map.update + (\ps -> Just $! ps{dpsObjectsPending = Map.insert objectId object (dpsObjectsPending ps)}) + peerAddr + dgsPeerStates \ No newline at end of file diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 535dd7b3ba..7ab9bde0b1 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -1,11 +1,12 @@ {-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE ImportQualifiedPost #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE DerivingVia #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types ( -- * DecisionPeerState @@ -16,8 +17,9 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -- * Decisions , PeerDecision (..) - , emptyPeerDecision + , mempty , TraceDecisionLogic (..) + , ObjectMultiplicity (..) -- * Reporting , ObjectDiffusionCounters (..) @@ -47,11 +49,20 @@ import Ouroboros.Network.Protocol.ObjectDiffusion.Type import System.Random (StdGen) import Data.Word (Word64) import Ouroboros.Network.ControlMessage (ControlMessage) +import Control.DeepSeq (NFData) +import Quiet (Quiet (..)) -- -- DecisionPeerState, DecisionGlobalState -- + +-- | In all the fields' names, +-- If "Ids" appears at the beginning of a name field, it means we refer to IDs +-- specifically (i.e. before the corresponding object is in flight). +-- On the other hand, a field name of the form "Objects...Ids" means we are +-- speaking of objects (i.e. after they have been requested) but identify them +-- by their IDs for this field purpose. data DecisionPeerState objectId object = DecisionPeerState { dpsOutstandingFifo :: !(StrictSeq objectId) -- ^ Those objects (by their identifier) that the client has told @@ -78,22 +89,16 @@ data DecisionPeerState objectId object = DecisionPeerState -- TODO: for object diffusion, every requested object must be received, so -- we don't need to track this. But we should disconnect if the peer hasn't -- sent us exactly the requested object. - , dpsScore :: !Double - -- ^ Score is a metric that tracks how usefull a peer has been. - -- The larger the value the less usefull peer. It slowly decays towards - -- zero. - , dpsScoreLastUpdatedAt :: !Time - -- ^ Timestamp for the last time `dpsScore` was updated. , dpsObjectsPending :: !(Map objectId object) -- ^ A set of objects downloaded from the peer. They are not yet -- acknowledged and haven't been sent to the objectpool yet. -- -- Life cycle of entries: - -- * added when a object is downloaded in `collectObjectsImpl` + -- * added when a object is downloaded in `handleReceivedObjectsImpl` -- * removed by `acknowledgeObjectIds` (to properly follow `dpsOutstandingFifo`) , dpsObjectsOwtPool :: !(Map objectId object) -- ^ A set of objects on their way to the objectpool. - -- Tracked here so that we can cleanup `dgsObjectsOwtPool` if the + -- Tracked here so that we can cleanup `dgsObjectsOwtPoolMultiplicities` if the -- peer dies. -- -- Life cycle of entries: @@ -112,14 +117,14 @@ instance -- | Shared state of all `ObjectDiffusion` clients. -- -- New `objectId` enters `dpsOutstandingFifo` it is also added to `dpsIdsAvailable` --- and `dgsObjectReferenceCounts` (see `acknowledgeObjectIdsImpl`). +-- and `dgsObjectsLiveMultiplicities` (see `acknowledgeObjectIdsImpl`). -- --- When the requested object arrives, the corresponding entry is removed from `dgsObjectsInflightMultiplicities` and it is added to `dgsObjectsPending` (see `collectObjectsImpl`). +-- When the requested object arrives, the corresponding entry is removed from `dgsObjectsInflightMultiplicities` and it is added to `dgsObjectsPending` (see `handleReceivedObjectsImpl`). -- -- Whenever we choose an `objectId` to acknowledge (either in `acknowledObjectsIds`, --- `collectObjectsImpl` or +-- `handleReceivedObjectsImpl` or -- `pickObjectsToDownload`, we also --- recalculate `dgsObjectReferenceCounts` and only keep live `objectId`s in other maps (e.g. +-- recalculate `dgsObjectsLiveMultiplicities` and only keep live `objectId`s in other maps (e.g. -- `dpsIdsAvailable`, `dgsObjectsPending`). data DecisionGlobalState peerAddr objectId object = DecisionGlobalState { dgsPeerStates :: !(Map peerAddr (DecisionPeerState objectId object)) @@ -128,7 +133,7 @@ data DecisionGlobalState peerAddr objectId object = DecisionGlobalState -- /Invariant:/ for peerAddr's which are registered using `withPeer`, -- there's always an entry in this map even if the set of `objectId`s is -- empty. - , dgsObjectsInflightMultiplicities :: !(Map objectId Int) + , dgsObjectsInflightMultiplicities :: !(Map objectId ObjectMultiplicity) -- ^ Map from object ids of objects which are in-flight (have already been -- requested) to their multiplicities (from how many peers it is -- currently in-flight) @@ -149,18 +154,18 @@ data DecisionGlobalState peerAddr objectId object = DecisionGlobalState -- -- This map is useful to acknowledge `objectId`s: it's basically taking the -- longest prefix which contains entries in `dgsObjectsPending` - , dgsObjectReferenceCounts :: !(Map objectId Int) + , dgsObjectsLiveMultiplicities :: !(Map objectId ObjectMultiplicity) -- ^ We track reference counts of all unacknowledged and dgsRententionTimeouts objectIds. -- Once the count reaches 0, a object is removed from `dgsObjectsPending`. -- - -- The `dgsObjectsOwtPool` map contains a subset of `objectId` which - -- `dgsObjectReferenceCounts` contains. + -- The `dgsObjectsOwtPoolMultiplicities` map contains a subset of `objectId` which + -- `dgsObjectsLiveMultiplicities` contains. -- -- /Invariants:/ -- -- * the objectId count is equal to multiplicity of objectId in all -- `dpsOutstandingFifo` sequences; - -- * @Map.keysSet dgsObjectsPending `Set.isSubsetOf` Map.keysSet dgsObjectReferenceCounts@; + -- * @Map.keysSet dgsObjectsPending `Set.isSubsetOf` Map.keysSet dgsObjectsLiveMultiplicities@; -- * all counts are positive integers. , dgsRententionTimeouts :: !(Map Time [objectId]) -- ^ A set of timeouts for objectIds that have been added to dgsObjectsPending after being @@ -170,8 +175,8 @@ data DecisionGlobalState peerAddr objectId object = DecisionGlobalState -- acknowledge this `objectId` to all peers, when a peer from another -- continent presents us it again. -- - -- Every objectId entry has a reference count in `dgsObjectReferenceCounts`. - , dgsObjectsOwtPool :: !(Map objectId Int) + -- Every objectId entry has a reference count in `dgsObjectsLiveMultiplicities`. + , dgsObjectsOwtPoolMultiplicities :: !(Map objectId ObjectMultiplicity) -- ^ A set of objectIds that have been downloaded by a peer and are on their -- way to the objectpool. We won't issue further fetch-requests for objects in -- this state. We track these objects to not re-download them from another @@ -251,36 +256,35 @@ instance Ord objectId => Semigroup (PeerDecision objectId object) where , pdObjectsToReqIds = pdObjectsToReqIds <> pdObjectsToReqIds' , pdObjectsOwtPool = pdObjectsOwtPool <> pdObjectsOwtPool' } - --- | A no-op decision. -emptyPeerDecision :: PeerDecision objectId object -emptyPeerDecision = - PeerDecision +instance Ord objectId => Monoid (PeerDecision objectId object) where + mempty = PeerDecision { pdIdsToAck = 0 , pdIdsToReq = 0 , pdCanPipelineIdsReq = False - , pdObjectsToReqIds = Map.empty - , pdObjectsOwtPool = mempty + , pdObjectsToReqIds = Set.empty + , pdObjectsOwtPool = Map.empty } -- | ObjectLogic tracer. data TraceDecisionLogic peerAddr objectId object - = TraceDecisionGlobalState String (DecisionGlobalState peerAddr objectId object) - | TracePeerDecisions (Map peerAddr (PeerDecision objectId object)) + = TraceDecisionLogicGlobalStateUpdated String (DecisionGlobalState peerAddr objectId object) + | TraceDecisionLogicDecisionsMade (Map peerAddr (PeerDecision objectId object)) deriving Show data ObjectDiffusionCounters = ObjectDiffusionCounters { odcNumObjectsAvailable :: Int -- ^ objectIds which are not yet downloaded. This is a diff of keys sets of - -- `dgsObjectReferenceCounts` and a sum of `dgsObjectsPending` and + -- `dgsObjectsLiveMultiplicities` and a sum of `dgsObjectsPending` and -- `inbubmissionToObjectPoolObjects` maps. , odcNumObjectsInFlight :: Int -- ^ number of all in-flight objects. , odcNumObjectsPending :: Int -- ^ number of all buffered objects (downloaded or not available) , odcNumObjectsOwtPool :: Int - -- ^ number of all object's which were submitted to the objectpool + -- ^ number of distinct objects which are waiting to be added to the + -- objectpool (each peer need to acquire the semaphore to effectively add + -- them to the pool) } deriving (Eq, Show) @@ -292,18 +296,18 @@ makeObjectDiffusionCounters DecisionGlobalState { dgsObjectsInflightMultiplicities , dgsObjectsPending - , dgsObjectReferenceCounts - , dgsObjectsOwtPool + , dgsObjectsLiveMultiplicities + , dgsObjectsOwtPoolMultiplicities } = ObjectDiffusionCounters { odcNumObjectsAvailable = Set.size $ - Map.keysSet dgsObjectReferenceCounts + Map.keysSet dgsObjectsLiveMultiplicities Set.\\ Map.keysSet dgsObjectsPending - Set.\\ Map.keysSet dgsObjectsOwtPool + Set.\\ Map.keysSet dgsObjectsOwtPoolMultiplicities , odcNumObjectsPending = Map.size dgsObjectsPending - , odcNumObjectsOwtPool = Map.size dgsObjectsOwtPool - , odcNumObjectsInFlight = getSum $ foldMap Sum dgsObjectsInflightMultiplicities + , odcNumObjectsOwtPool = Map.size dgsObjectsOwtPoolMultiplicities + , odcNumObjectsInFlight = fromIntegral $ mconcat (Map.elems dgsObjectsInflightMultiplicities) } data ObjectDiffusionInitDelay @@ -320,13 +324,27 @@ newtype NumObjectsProcessed = NumObjectsProcessed { getNumObjectsProcessed :: Word64 } - deriving (Eq, Show) + deriving (Eq, Ord, NFData, Generic) + deriving newtype (Num, Enum, Real, Integral, Bounded, NoThunks) + deriving (Semigroup) via (Sum Word64) + deriving (Monoid) via (Sum Word64) + deriving (Show) via (Quiet NumObjectsProcessed) + +newtype ObjectMultiplicity + = ObjectMultiplicity + { getObjectMultiplicity :: Word64 + } + deriving (Eq, Ord, NFData, Generic) + deriving newtype (Num, Enum, Real, Integral, Bounded, NoThunks) + deriving (Semigroup) via (Sum Word64) + deriving (Monoid) via (Sum Word64) + deriving (Show) via (Quiet ObjectMultiplicity) data TraceObjectDiffusionInbound objectId object = -- | Number of objects just about to be inserted. TraceObjectDiffusionInboundCollectedObjects Int | -- | Just processed object pass/fail breakdown. - TraceObjectDiffusionInboundAddedObjects NumObjectsProcessed + TraceObjectDiffusionInboundAddedObjects Int | -- | Received a 'ControlMessage' from the outbound peer governor, and about -- to act on it. TraceObjectDiffusionInboundRecvControlMessage ControlMessage From dac65af86ca7e2bba115a481daa99a4e96222cf6 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Tue, 7 Oct 2025 17:37:47 +0200 Subject: [PATCH 09/43] WIP: cleaning State.hs file --- .../ObjectDiffusion/Inbound/V2.hs | 55 +-- .../ObjectDiffusion/Inbound/V2/Decision.hs | 214 ++++++++-- .../ObjectDiffusion/Inbound/V2/Policy.hs | 7 +- .../ObjectDiffusion/Inbound/V2/Registry.hs | 7 - .../ObjectDiffusion/Inbound/V2/State.hs | 374 +++--------------- .../ObjectDiffusion/Inbound/V2/Types.hs | 102 ++--- 6 files changed, 314 insertions(+), 445 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 58e100ea79..ef8b0c6cec 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -38,9 +38,10 @@ import qualified Data.Set as Set import Network.TypedProtocol import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State (newDecisionGlobalStateVar) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types as V2 import Ouroboros.Network.Protocol.ObjectDiffusion.Inbound +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API -- | A object-submission inbound side (server, sic!). -- @@ -56,37 +57,37 @@ objectDiffusionInbound :: ) => Tracer m (TraceObjectDiffusionInbound objectId object) -> ObjectDiffusionInitDelay -> - ObjectPoolWriter objectId object ticketNo m -> + ObjectPoolWriter objectId object m -> InboundPeerAPI m objectId object -> - ObjectDiffusionServerPipelined objectId object m () + ObjectDiffusionInboundPipelined objectId object m () objectDiffusionInbound tracer initDelay - ObjectPoolWriter{objectId} + ObjectPoolWriter{} InboundPeerAPI { readPeerDecision , handleReceivedIds , handleReceivedObjects - , submitObjectToPool + , submitObjectsToPool } = - ObjectDiffusionServerPipelined $ do + ObjectDiffusionInboundPipelined $ do case initDelay of ObjectDiffusionInitDelay delay -> threadDelay delay NoObjectDiffusionInitDelay -> return () serverIdle where serverIdle :: - m (ServerStIdle Z objectId object m ()) + m (InboundStIdle Z objectId object m ()) serverIdle = do -- Block on next decision. object@PeerDecision { pdObjectsToReqIds = pdObjectsToReqIds - , pdObjectsOwtPool = pdObjectsOwtPool + , pdObjectsToPool = pdObjectsToPool } <- readPeerDecision - traceWith tracer (TraceObjectInboundDecision object) + traceWith tracer (TraceObjectDiffusionInboundDecisionReceived object) - let !collected = length listOf[(objectId, object)] + let !collected = length undefined -- Only attempt to add objects if we have some work to do when (collected > 0) $ do @@ -95,30 +96,30 @@ objectDiffusionInbound -- \* `TraceObjectInboundAddedToObjectPool`, and -- \* `TraceObjectInboundRejectedFromObjectPool` -- events. - mapM_ (uncurry $ submitObjectToPool tracer) listOf[(objectId, object)] + mapM_ undefined undefined -- (uncurry $ submitObjectsToPool undefined) undefined -- TODO: -- We can update the state so that other `object-submission` servers will -- not try to add these objects to the objectpool. - if Map.null pdObjectsToReqIds + if Set.null pdObjectsToReqIds then serverReqObjectIds Zero object else serverReqObjects object -- Pipelined request of objects serverReqObjects :: PeerDecision objectId object -> - m (ServerStIdle Z objectId object m ()) + m (InboundStIdle Z objectId object m ()) serverReqObjects object@PeerDecision{pdObjectsToReqIds = pdObjectsToReqIds} = pure $ SendMsgRequestObjectsPipelined - pdObjectsToReqIds + (Set.toList pdObjectsToReqIds) (serverReqObjectIds (Succ Zero) object) serverReqObjectIds :: forall (n :: N). Nat n -> PeerDecision objectId object -> - m (ServerStIdle n objectId object m ()) + m (InboundStIdle n objectId object m ()) serverReqObjectIds n PeerDecision{pdIdsToReq = 0} = @@ -141,14 +142,14 @@ objectDiffusionInbound objectIdsToAck objectIdsToReq -- Our result if the client terminates the protocol - (traceWith tracer TraceObjectInboundTerminated) + -- (traceWith tracer TraceObjectDiffusionInboundTerminated) ( \objectIds -> do let objectIds' = NonEmpty.toList objectIds - objectIdsSeq = StrictSeq.fromList $ fst <$> objectIds' + receivedIdsSeq = StrictSeq.fromList $ fst <$> objectIds' objectIdsMap = Map.fromList objectIds' - unless (StrictSeq.length objectIdsSeq <= fromIntegral objectIdsToReq) $ + when (StrictSeq.length receivedIdsSeq > fromIntegral objectIdsToReq) $ throwIO ProtocolErrorObjectIdsNotRequested - handleReceivedIds objectIdsToReq objectIdsSeq objectIdsMap + handleReceivedIds objectIdsToReq receivedIdsSeq objectIdsMap serverIdle ) serverReqObjectIds @@ -182,7 +183,7 @@ objectDiffusionInbound handleReplies :: forall (n :: N). Nat (S n) -> - m (ServerStIdle (S n) objectId object m ()) + m (InboundStIdle (S n) objectId object m ()) handleReplies (Succ n'@Succ{}) = pure $ CollectPipelined @@ -196,27 +197,27 @@ objectDiffusionInbound handleReply :: forall (n :: N). - m (ServerStIdle n objectId object m ()) -> + m (InboundStIdle n objectId object m ()) -> -- continuation Collect objectId object -> - m (ServerStIdle n objectId object m ()) + m (InboundStIdle n objectId object m ()) handleReply k = \case CollectObjectIds objectIdsToReq objectIds -> do - let objectIdsSeq = StrictSeq.fromList $ fst <$> objectIds + let receivedIdsSeq = StrictSeq.fromList $ fst <$> objectIds objectIdsMap = Map.fromList objectIds - unless (StrictSeq.length objectIdsSeq <= fromIntegral objectIdsToReq) $ + unless (StrictSeq.length receivedIdsSeq <= fromIntegral objectIdsToReq) $ throwIO ProtocolErrorObjectIdsNotRequested - handleReceivedIds objectIdsToReq objectIdsSeq objectIdsMap + handleReceivedIds objectIdsToReq receivedIdsSeq objectIdsMap k CollectObjects objectIds objects -> do let requested = Map.keysSet objectIds - received = Map.fromList [(objectId object, object) | object <- objects] + received = Map.fromList undefined unless (Map.keysSet received `Set.isSubsetOf` requested) $ throwIO ProtocolErrorObjectNotRequested mbe <- handleReceivedObjects objectIds received - traceWith tracer $ TraceObjectDiffusionCollected (objectId `map` objects) + traceWith tracer $ TraceObjectDiffusionCollected (getId `map` objects) case mbe of -- one of `object`s had a wrong size Just e -> diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 2c69f783fb..ed37189db7 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -96,7 +96,7 @@ data DecisionInternalState peerAddr objectId object , disIdsToAckMultiplicities :: !(Map objectId ObjectMultiplicity) -- ^ acknowledged `objectId` with multiplicities. It is used to update -- `dgsObjectsLiveMultiplicities`. - , disObjectsOwtPoolIds :: Map objectId object + , disObjectsOwtPoolIds :: Set objectId -- ^ objects on their way to the objectpool. Used to prevent issueing new -- fetch requests for them. } @@ -134,7 +134,6 @@ pickObjectsToDownload sharedState@DecisionGlobalState { dgsPeerStates , dgsObjectsInflightMultiplicities - , dgsObjectsPending , dgsObjectsOwtPoolMultiplicities , dgsObjectsLiveMultiplicities } = @@ -147,7 +146,7 @@ pickObjectsToDownload , disNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 dgsObjectsInflightMultiplicities -- Thomas: not sure here if we must count disctinct objects in flight, or total number of objects in flight (considering multiplicities) , disIdsToAckMultiplicities = Map.empty - , disObjectsOwtPoolIds = dgsObjectsOwtPoolMultiplicities + , disObjectsOwtPoolIds = Map.keysSet dgsObjectsOwtPoolMultiplicities } >>> gn where @@ -181,7 +180,7 @@ pickObjectsToDownload then let ( numObjectIdsToAck , numObjectIdsToReq - , pdObjectsOwtPool + , pdObjectsToPool , RefCountDiff{rcdIdsToAckMultiplicities} , peerObjectState' ) = acknowledgeObjectIds decisionPolicy sharedState peerObjectState @@ -189,7 +188,7 @@ pickObjectsToDownload disIdsToAckMultiplicities' = Map.unionWith (+) disIdsToAckMultiplicities rcdIdsToAckMultiplicities disObjectsOwtPoolIds' = disObjectsOwtPoolIds - <> Map.keysSet pdObjectsOwtPool + <> Map.keysSet pdObjectsToPool in if dpsNumIdsInflight peerObjectState' > 0 then -- we have objectIds to request @@ -208,7 +207,7 @@ pickObjectsToDownload . dpsOutstandingFifo $ peerObjectState' , pdObjectsToReqIds = Set.empty - , pdObjectsOwtPool = pdObjectsOwtPool + , pdObjectsToPool = pdObjectsToPool } ) ) @@ -259,7 +258,7 @@ pickObjectsToDownload disObjectsInflightMultiplicities -- remove `object`s which were already downloaded by some -- other peer or are in-flight or unknown by this peer. - `Set.unions` ( Map.keysSet dgsObjectsPending + `Set.unions` ( Map.keysSet dgsObjectsLiveMultiplicities <> dpsObjectsInflightIds <> dpsObjectsRequestedButNotReceivedIds <> disObjectsOwtPoolIds @@ -276,7 +275,7 @@ pickObjectsToDownload ( numObjectIdsToAck , numObjectIdsToReq - , pdObjectsOwtPool + , pdObjectsToPool , RefCountDiff{rcdIdsToAckMultiplicities} , peerObjectState'' ) = acknowledgeObjectIds decisionPolicy sharedState peerObjectState' @@ -293,7 +292,7 @@ pickObjectsToDownload disObjectsOwtPoolIds' = disObjectsOwtPoolIds - <> Set.fromList (map fst pdObjectsOwtPool) + <> Set.fromList (map fst pdObjectsToPool) in if dpsNumIdsInflight peerObjectState'' > 0 then -- we can request `objectId`s & `object`s @@ -314,7 +313,7 @@ pickObjectsToDownload $ peerObjectState'' , pdIdsToReq = numObjectIdsToReq , pdObjectsToReqIds = pdObjectsToReqIdsMap - , pdObjectsOwtPool = pdObjectsOwtPool + , pdObjectsToPool = pdObjectsToPool } ) ) @@ -360,18 +359,11 @@ pickObjectsToDownload dgsObjectsLiveMultiplicities disIdsToAckMultiplicities - liveSet = Map.keysSet dgsObjectsLiveMultiplicities' - - dgsObjectsPending' = - dgsObjectsPending - `Map.restrictKeys` liveSet - dgsObjectsOwtPoolMultiplicities' = List.foldl' updateInSubmissionToObjectPoolObjects dgsObjectsOwtPoolMultiplicities as in ( sharedState { dgsPeerStates = dgsPeerStates' , dgsObjectsInflightMultiplicities = disObjectsInflightMultiplicities - , dgsObjectsPending = dgsObjectsPending' , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' } @@ -382,9 +374,9 @@ pickObjectsToDownload { pdIdsToAck = 0 , pdIdsToReq = 0 , pdObjectsToReqIds - , pdObjectsOwtPool } + , pdObjectsToPool } | null pdObjectsToReqIds - , Map.null pdObjectsOwtPool -> + , Map.null pdObjectsToPool -> Nothing _ -> Just (a, b) ) @@ -396,8 +388,8 @@ pickObjectsToDownload Map objectId Int -> (a, PeerDecision objectId object) -> Map objectId Int - updateInSubmissionToObjectPoolObjects m (_, PeerDecision{pdObjectsOwtPool}) = - List.foldl' fn m (Map.toList pdObjectsOwtPool) + updateInSubmissionToObjectPoolObjects m (_, PeerDecision{pdObjectsToPool}) = + List.foldl' fn m (Map.toList pdObjectsToPool) where fn :: Map objectId Int -> @@ -427,7 +419,6 @@ filterActivePeers } globalState@DecisionGlobalState { dgsPeerStates - , dgsObjectsPending , dgsObjectsInflightMultiplicities , dgsObjectsOwtPoolMultiplicities } @@ -435,7 +426,7 @@ filterActivePeers where unrequestable = Map.keysSet (Map.filter (>= dpMaxObjectInflightMultiplicity) dgsObjectsInflightMultiplicities) - <> Map.keysSet dgsObjectsPending + <> Map.keysSet dgsObjectsLiveMultiplicities fn :: DecisionPeerState objectId object -> Bool fn @@ -507,3 +498,180 @@ foldWithState f = foldr cons nil nil :: s -> (s, Map b c) nil = \ !s -> (s, Map.empty) + +-- +-- Pure public API +-- + +acknowledgeObjectIds :: + forall peerAddr object objectId. + Ord objectId => + HasCallStack => + DecisionPolicy -> + DecisionGlobalState peerAddr objectId object -> + DecisionPeerState objectId object -> + -- | number of objectId to acknowledge, requests, objects which we can submit to the + -- objectpool, objectIds to acknowledge with multiplicities, updated DecisionPeerState. + ( NumObjectIdsAck + , NumObjectIdsReq + , Map objectId object + -- ^ objectsOwtPool + , RefCountDiff objectId + , DecisionPeerState objectId object + ) +{-# INLINE acknowledgeObjectIds #-} +acknowledgeObjectIds + decisionPolicy + globalState + ps@DecisionPeerState + { dpsIdsAvailable + , dpsNumIdsInflight + , dpsObjectsPending + , dpsObjectsOwtPool + } = + -- We can only acknowledge objectIds when we can request new ones, since + -- a `MsgRequestObjectIds` for 0 objectIds is a protocol error. + if pdIdsToReq > 0 + then + ( pdIdsToAck + , pdIdsToReq + , objectsOwtPool + , refCountDiff + , ps + { dpsOutstandingFifo = dpsOutstandingFifo' + , dpsIdsAvailable = dpsIdsAvailable' + , dpsNumIdsInflight = + dpsNumIdsInflight + + pdIdsToReq + , dpsObjectsPending = dpsObjectsPending' + , dpsObjectsOwtPool = dpsObjectsOwtPool' + } + ) + else + ( 0 + , 0 + , objectsOwtPool + , RefCountDiff Map.empty + , ps{dpsObjectsOwtPool = dpsObjectsOwtPool'} + ) + where + -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which + -- can be acknowledged and the unacknowledged `objectId`s. + (pdIdsToReq, acknowledgedObjectIds, dpsOutstandingFifo') = + splitAcknowledgedObjectIds decisionPolicy globalState ps + + objectsOwtPoolList = + [ (objectId, object) + | objectId <- toList toObjectPoolObjectIds + , objectId `Map.notMember` dgsObjectsLiveMultiplicities globalState + , object <- maybeToList $ objectId `Map.lookup` dpsObjectsPending + ] + (toObjectPoolObjectIds, _) = + StrictSeq.spanl (`Map.member` dpsObjectsPending) acknowledgedObjectIds + + objectsOwtPool = Map.fromList objectsOwtPoolList + + dpsObjectsOwtPool' = dpsObjectsOwtPool <> objectsOwtPool + + (dpsObjectsPending', ackedDownloadedObjects) = Map.partitionWithKey (\objectId _ -> objectId `Set.member` liveSet) dpsObjectsPending + -- lateObjects: objects which were downloaded by another peer before we + -- downloaded them; it relies on that `objectToObjectPool` filters out + -- `dgsObjectsPending`. + lateObjects = + Map.filterWithKey + (\objectId _ -> objectId `Map.notMember` objectsOwtPool) + ackedDownloadedObjects + + -- the set of live `objectIds` + liveSet = Set.fromList (toList dpsOutstandingFifo') + dpsIdsAvailable' = dpsIdsAvailable `Set.intersection` liveSet + + -- We remove all acknowledged `objectId`s which are not in + -- `dpsOutstandingFifo''`, but also return the unknown set before any + -- modifications (which is used to compute `dpsOutstandingFifo''` + -- above). + + refCountDiff = + RefCountDiff $ + foldr + (Map.alter fn) + Map.empty + acknowledgedObjectIds + where + fn :: Maybe Int -> Maybe Int + fn Nothing = Just 1 + fn (Just n) = Just $! n + 1 + + pdIdsToAck :: NumObjectIdsAck + pdIdsToAck = fromIntegral $ StrictSeq.length acknowledgedObjectIds + +-- | Split unacknowledged objectIds into acknowledged and unacknowledged parts, also +-- return number of objectIds which can be requested. +splitAcknowledgedObjectIds :: + Ord objectId => + HasCallStack => + DecisionPolicy -> + DecisionGlobalState peer objectId object -> + DecisionPeerState objectId object -> + -- | number of objectIds to request, acknowledged objectIds, unacknowledged objectIds + (NumObjectIdsReq, StrictSeq.StrictSeq objectId, StrictSeq.StrictSeq objectId) +splitAcknowledgedObjectIds + DecisionPolicy + { dpMaxNumObjectsOutstanding + , dpMaxNumObjectIdsReq + } + DecisionGlobalState + { dgsObjectsLiveMultiplicities + } + DecisionPeerState + { dpsOutstandingFifo + , dpsObjectsPending + , dpsObjectsInflightIds + , dpsNumIdsInflight + } = + (pdIdsToReq, acknowledgedObjectIds', dpsOutstandingFifo') + where + (acknowledgedObjectIds', dpsOutstandingFifo') = + StrictSeq.spanl + ( \objectId -> + ( objectId `Map.member` dgsObjectsLiveMultiplicities + || objectId `Set.member` dpsObjectsRequestedButNotReceivedIds + || objectId `Map.member` dpsObjectsPending + ) + && objectId `Set.notMember` dpsObjectsInflightIds + ) + dpsOutstandingFifo + numOfUnacked = StrictSeq.length dpsOutstandingFifo + numOfAcked = StrictSeq.length acknowledgedObjectIds' + unackedAndRequested = fromIntegral numOfUnacked + dpsNumIdsInflight + + pdIdsToReq = + assert (unackedAndRequested <= dpMaxNumObjectsOutstanding) $ + assert (dpsNumIdsInflight <= dpMaxNumObjectIdsReq) $ + (dpMaxNumObjectsOutstanding - unackedAndRequested + fromIntegral numOfAcked) + `min` (dpMaxNumObjectIdsReq - dpsNumIdsInflight) + +-- | `RefCountDiff` represents a map of `objectId` which can be acknowledged +-- together with their multiplicities. +newtype RefCountDiff objectId = RefCountDiff + { rcdIdsToAckMultiplicities :: Map objectId Int + } + +updateRefCounts :: + Ord objectId => + Map objectId Int -> + RefCountDiff objectId -> + Map objectId Int +updateRefCounts dgsObjectsLiveMultiplicities (RefCountDiff diff) = + Map.merge + (Map.mapMaybeMissing \_ x -> Just x) + (Map.mapMaybeMissing \_ _ -> Nothing) + ( Map.zipWithMaybeMatched \_ x y -> + assert + (x >= y) + if x > y + then Just $! x - y + else Nothing + ) + dgsObjectsLiveMultiplicities + diff \ No newline at end of file diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs index 20c7a010cc..8d993086db 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -1,11 +1,6 @@ -{-# LANGUAGE ImportQualifiedPost #-} - module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy ( DecisionPolicy (..) , defaultDecisionPolicy - - -- * Re-exports - , NumObjectIdsReq (..) ) where import Control.Monad.Class.MonadTime.SI @@ -26,7 +21,7 @@ data DecisionPolicy = DecisionPolicy -- ^ from how many peers download the `objectId` simultaneously , dpMinObtainedButNotAckedObjectsLifetime :: !DiffTime -- ^ how long objects that have been added to the objectpool will be - -- kept in the `dgsObjectsPending` cache. + -- kept in the `dgsObjectsLiveMultiplicities` cache. } deriving Show diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 59b518d154..bbdda2e153 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -178,14 +178,12 @@ withPeer unregisterPeerGlobalState st@DecisionGlobalState { dgsPeerStates - , dgsObjectsPending , dgsObjectsLiveMultiplicities , dgsObjectsInflightMultiplicities , dgsObjectsOwtPoolMultiplicities } = st { dgsPeerStates = dgsPeerStates' - , dgsObjectsPending = dgsObjectsPending' , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' , dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' @@ -222,11 +220,6 @@ withPeer dgsObjectsLiveMultiplicities dpsOutstandingFifo - -- Update dgsObjectsPending map to only contain live objects - -- (to reflect the new state of dgsObjectsLiveMultiplicities). - liveSet = Map.keysSet dgsObjectsLiveMultiplicities' - dgsObjectsPending' = Map.restrictKeys dgsObjectsPending liveSet - -- Update dgsInflightMultiplicities map by decreasing the count -- of objects that were in-flight for this peer. dgsObjectsInflightMultiplicities' = diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 47b5922ad1..b39d614f07 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -16,29 +16,19 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State , handleReceivedIds , handleReceivedObjects , submitObjectsToPool - , acknowledgeObjectIds - , splitAcknowledgedObjectIds - , tickTimedObjects - , const_MAX_OBJECT_SIZE_DISCREPENCY - - -- * Internals, only exported for testing purposes: - , RefCountDiff (..) - , updateRefCounts - , handleReceivedIdsImpl - , handleReceivedObjectsImpl ) where import Control.Concurrent.Class.MonadSTM.Strict import Control.Exception (assert) import Control.Monad.Class.MonadTime.SI import Control.Tracer (Tracer, traceWith) -import Data.Foldable (fold, toList) +import Data.Foldable (toList) import Data.Foldable qualified as Foldable import Data.Functor (($>)) import Data.Map.Merge.Strict qualified as Map -import Data.Map.Strict (Map) +import Data.Map.Strict (Map, findWithDefault) import Data.Map.Strict qualified as Map -import Data.Maybe (fromJust, maybeToList) +import Data.Maybe (fromJust) import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq import Data.Set (Set) @@ -47,241 +37,23 @@ import Data.Typeable (Typeable) import GHC.Stack (HasCallStack) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (SizeInBytes (..), ObjectPoolWriter (opwHasObject, opwObjectId)) -import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsAck (..)) +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (ObjectPoolWriter (opwHasObject, opwObjectId)) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq) import System.Random (StdGen) --- --- Pure public API --- - -acknowledgeObjectIds :: - forall peerAddr object objectId. - Ord objectId => - HasCallStack => - DecisionPolicy -> - DecisionGlobalState peerAddr objectId object -> - DecisionPeerState objectId object -> - -- | number of objectId to acknowledge, requests, objects which we can submit to the - -- objectpool, objectIds to acknowledge with multiplicities, updated DecisionPeerState. - ( NumObjectIdsAck - , NumObjectIdsReq - , Map objectId object - -- ^ objectsOwtPool - , RefCountDiff objectId - , DecisionPeerState objectId object - ) -{-# INLINE acknowledgeObjectIds #-} -acknowledgeObjectIds - decisionPolicy - globalState - ps@DecisionPeerState - { dpsIdsAvailable - , dpsNumIdsInflight - , dpsObjectsPending - , dpsObjectsOwtPool - } = - -- We can only acknowledge objectIds when we can request new ones, since - -- a `MsgRequestObjectIds` for 0 objectIds is a protocol error. - if pdIdsToReq > 0 - then - ( pdIdsToAck - , pdIdsToReq - , objectsOwtPool - , refCountDiff - , ps - { dpsOutstandingFifo = dpsOutstandingFifo' - , dpsIdsAvailable = dpsIdsAvailable' - , dpsNumIdsInflight = - dpsNumIdsInflight - + pdIdsToReq - , dpsObjectsPending = dpsObjectsPending' - , dpsObjectsOwtPool = dpsObjectsOwtPool' - } - ) - else - ( 0 - , 0 - , objectsOwtPool - , RefCountDiff Map.empty - , ps{dpsObjectsOwtPool = dpsObjectsOwtPool'} - ) - where - -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which - -- can be acknowledged and the unacknowledged `objectId`s. - (pdIdsToReq, acknowledgedObjectIds, dpsOutstandingFifo') = - splitAcknowledgedObjectIds decisionPolicy globalState ps - - objectsOwtPoolList = - [ (objectId, object) - | objectId <- toList toObjectPoolObjectIds - , objectId `Map.notMember` dgsObjectsPending globalState - , object <- maybeToList $ objectId `Map.lookup` dpsObjectsPending - ] - (toObjectPoolObjectIds, _) = - StrictSeq.spanl (`Map.member` dpsObjectsPending) acknowledgedObjectIds - - objectsOwtPool = Map.fromList objectsOwtPoolList - - dpsObjectsOwtPool' = dpsObjectsOwtPool <> objectsOwtPool +data SizeInBytes - (dpsObjectsPending', ackedDownloadedObjects) = Map.partitionWithKey (\objectId _ -> objectId `Set.member` liveSet) dpsObjectsPending - -- latexObjects: objects which were downloaded by another peer before we - -- downloaded them; it relies on that `objectToObjectPool` filters out - -- `dgsObjectsPending`. - lateObjects = - Map.filterWithKey - (\objectId _ -> objectId `Map.notMember` objectsOwtPool) - ackedDownloadedObjects - - -- the set of live `objectIds` - liveSet = Set.fromList (toList dpsOutstandingFifo') - dpsIdsAvailable' = dpsIdsAvailable `Set.intersection` liveSet - - -- We remove all acknowledged `objectId`s which are not in - -- `dpsOutstandingFifo''`, but also return the unknown set before any - -- modifications (which is used to compute `dpsOutstandingFifo''` - -- above). - - refCountDiff = - RefCountDiff $ - foldr - (Map.alter fn) - Map.empty - acknowledgedObjectIds - where - fn :: Maybe Int -> Maybe Int - fn Nothing = Just 1 - fn (Just n) = Just $! n + 1 - - pdIdsToAck :: NumObjectIdsAck - pdIdsToAck = fromIntegral $ StrictSeq.length acknowledgedObjectIds - --- | Split unacknowledged objectIds into acknowledged and unacknowledged parts, also --- return number of objectIds which can be requested. -splitAcknowledgedObjectIds :: - Ord objectId => - HasCallStack => - DecisionPolicy -> - DecisionGlobalState peer objectId object -> - DecisionPeerState objectId object -> - -- | number of objectIds to request, acknowledged objectIds, unacknowledged objectIds - (NumObjectIdsReq, StrictSeq.StrictSeq objectId, StrictSeq.StrictSeq objectId) -splitAcknowledgedObjectIds - DecisionPolicy - { dpMaxNumObjectsOutstanding - , dpMaxNumObjectIdsReq - } - DecisionGlobalState - { dgsObjectsPending - } - DecisionPeerState - { dpsOutstandingFifo - , dpsObjectsPending - , dpsObjectsInflightIds - , dpsNumIdsInflight - } = - (pdIdsToReq, acknowledgedObjectIds', dpsOutstandingFifo') - where - (acknowledgedObjectIds', dpsOutstandingFifo') = - StrictSeq.spanl - ( \objectId -> - ( objectId `Map.member` dgsObjectsPending - || objectId `Set.member` dpsObjectsRequestedButNotReceivedIds - || objectId `Map.member` dpsObjectsPending - ) - && objectId `Set.notMember` dpsObjectsInflightIds - ) - dpsOutstandingFifo - numOfUnacked = StrictSeq.length dpsOutstandingFifo - numOfAcked = StrictSeq.length acknowledgedObjectIds' - unackedAndRequested = fromIntegral numOfUnacked + dpsNumIdsInflight - - pdIdsToReq = - assert (unackedAndRequested <= dpMaxNumObjectsOutstanding) $ - assert (dpsNumIdsInflight <= dpMaxNumObjectIdsReq) $ - (dpMaxNumObjectsOutstanding - unackedAndRequested + fromIntegral numOfAcked) - `min` (dpMaxNumObjectIdsReq - dpsNumIdsInflight) - --- | `RefCountDiff` represents a map of `objectId` which can be acknowledged --- together with their multiplicities. -newtype RefCountDiff objectId = RefCountDiff - { rcdIdsToAckMultiplicities :: Map objectId Int - } - -updateRefCounts :: - Ord objectId => - Map objectId Int -> - RefCountDiff objectId -> - Map objectId Int -updateRefCounts dgsObjectsLiveMultiplicities (RefCountDiff diff) = - Map.merge - (Map.mapMaybeMissing \_ x -> Just x) - (Map.mapMaybeMissing \_ _ -> Nothing) - ( Map.zipWithMaybeMatched \_ x y -> - assert - (x >= y) - if x > y - then Just $! x - y - else Nothing - ) - dgsObjectsLiveMultiplicities - diff - -tickTimedObjects :: - forall peerAddr object objectId. - Ord objectId => - Time -> - DecisionGlobalState peerAddr objectId object -> - DecisionGlobalState peerAddr objectId object -tickTimedObjects - now - st@DecisionGlobalState - { dgsRententionTimeouts - , dgsObjectsLiveMultiplicities - , dgsObjectsPending - } = - let (expiredObjects', dgsRententionTimeouts') = - case Map.splitLookup now dgsRententionTimeouts of - (expired, Just objectIds, timed) -> - ( expired -- Map.split doesn't include the `now` entry in the map - , Map.insert now objectIds timed - ) - (expired, Nothing, timed) -> - (expired, timed) - refDiff = Map.foldl' fn Map.empty expiredObjects' - dgsObjectsLiveMultiplicities' = updateRefCounts dgsObjectsLiveMultiplicities (RefCountDiff refDiff) - liveSet = Map.keysSet dgsObjectsLiveMultiplicities' - dgsObjectsPending' = dgsObjectsPending `Map.restrictKeys` liveSet - in st - { dgsRententionTimeouts = dgsRententionTimeouts' - , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' - , dgsObjectsPending = dgsObjectsPending' - } - where - fn :: - Map objectId Int -> - [objectId] -> - Map objectId Int - fn m objectIds = Foldable.foldl' gn m objectIds - - gn :: - Map objectId Int -> - objectId -> - Map objectId Int - gn m objectId = Map.alter af objectId m - - af :: - Maybe Int -> - Maybe Int - af Nothing = Just 1 - af (Just n) = Just $! succ n - --- --- Pure internal API --- - --- | Insert received `objectId`s and return the number of objectIds to be acknowledged +strictSeqPartition :: (a -> Bool) -> StrictSeq a -> (StrictSeq a, StrictSeq a) +strictSeqPartition p xs = go xs StrictSeq.empty StrictSeq.empty + where + go StrictSeq.Empty trues falses = (trues, falses) + go (y StrictSeq.:<| ys) trues falses + | p y = + go ys (trues StrictSeq.:|> y) falses + | otherwise = + go ys trues (falses StrictSeq.:|> y) + +-- | Insert received `objectId`s and return the number of objectIds to be acknowledged with next request -- and the updated `DecisionGlobalState`. handleReceivedIdsImpl :: forall peerAddr object objectId. @@ -295,76 +67,47 @@ handleReceivedIdsImpl :: NumObjectIdsReq -> -- | sequence of received `objectIds` StrictSeq objectId -> - -- | received `objectId`s with sizes - Set objectId -> DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object handleReceivedIdsImpl - opwHasObject + hasObject peerAddr - reqNo - objectIdsSeq - objectIdsSet + numIdsInitiallyRequested + receivedIdsSeq st@DecisionGlobalState { dgsPeerStates - , dgsObjectsPending , dgsObjectsLiveMultiplicities } = - -- using `alterF` so the update of `DecisionPeerState` is done in one lookup - case Map.alterF - (fmap Just . fn . fromJust) - peerAddr - dgsPeerStates of - (st', dgsPeerStates') -> - st'{dgsPeerStates = dgsPeerStates'} - where - -- update `DecisionPeerState` and return number of `objectId`s to acknowledged and - -- updated `DecisionGlobalState`. - fn :: - DecisionPeerState objectId object -> - ( DecisionGlobalState peerAddr objectId object - , DecisionPeerState objectId object - ) - fn - ps@DecisionPeerState - { dpsIdsAvailable - , dpsNumIdsInflight - , dpsOutstandingFifo - } = - (st', ps') - where - -- - -- Handle new `objectId`s - -- - + let peerState = + findWithDefault + (error "ObjectDiffusion.handleReceivedIdsImpl: the peer should appear in dgsPeerStates") + peerAddr + dgsPeerStates + -- Divide the new objectIds in two: those that are already in the objectpool -- and those that are not. We'll request some objects from the latter. - (ignoredObjectIds, dpsIdsAvailableSet) = - Set.partition opwHasObject objectIdsSet + (idsAlreadyInPoolSeq, newIdsAvailableSeq) = + strictSeqPartition hasObject receivedIdsSeq + + -- TODO: Stopped there -- Add all `objectIds` from `dpsIdsAvailableMap` which are not -- unacknowledged or already buffered. Unacknowledged objectIds must have -- already been added to `dpsIdsAvailable` map before. dpsIdsAvailable' = - Set.foldl + StrictSeq.foldl' (\m objectId -> Set.insert objectId m) dpsIdsAvailable ( Set.filter ( \objectId -> objectId `notElem` dpsOutstandingFifo - && objectId `Map.notMember` dgsObjectsPending + && objectId `Map.notMember` dgsObjectsLiveMultiplicities ) - dpsIdsAvailableSet + newIdsAvailableSeq ) -- Add received objectIds to `dpsOutstandingFifo`. - dpsOutstandingFifo' = dpsOutstandingFifo <> objectIdsSeq - - -- Add ignored `objects` to buffered ones. - -- Note: we prefer to keep the `object` if it's already in `dgsObjectsPending`. - dgsObjectsPending' = - dgsObjectsPending - <> Map.fromList ((, Nothing) <$> Set.toList ignoredObjectIds) + dpsOutstandingFifo' = dpsOutstandingFifo <> receivedIdsSeq dgsObjectsLiveMultiplicities' = Foldable.foldl' @@ -376,26 +119,21 @@ handleReceivedIdsImpl ) ) dgsObjectsLiveMultiplicities - objectIdsSeq + receivedIdsSeq st' = st - { dgsObjectsPending = dgsObjectsPending' - , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' + { dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' } ps' = assert - (dpsNumIdsInflight >= reqNo) + (dpsNumIdsInflight >= numIdsInitiallyRequested) ps { dpsIdsAvailable = dpsIdsAvailable' , dpsOutstandingFifo = dpsOutstandingFifo' - , dpsNumIdsInflight = dpsNumIdsInflight - reqNo + , dpsNumIdsInflight = dpsNumIdsInflight - numIdsInitiallyRequested } - --- | We check advertised sizes up in a fuzzy way. The advertised and received --- sizes need to agree up to `const_MAX_OBJECT_SIZE_DISCREPENCY`. -const_MAX_OBJECT_SIZE_DISCREPENCY :: SizeInBytes -const_MAX_OBJECT_SIZE_DISCREPENCY = 32 + in undefined handleReceivedObjectsImpl :: forall peerAddr object objectId. @@ -556,17 +294,17 @@ newDecisionGlobalStateVar rng = DecisionGlobalState { dgsPeerStates = Map.empty , dgsObjectsInflightMultiplicities = Map.empty - , dgsObjectsPending = Map.empty , dgsObjectsLiveMultiplicities = Map.empty , dgsRententionTimeouts = Map.empty , dgsObjectsOwtPoolMultiplicities = Map.empty , dgsRng = rng } --- | Acknowledge `objectId`s, return the number of `objectIds` to be acknowledged to the --- remote side. +-- | Wrapper around `handleReceivedIdsImpl`. +-- Obtain the `hasObject` function atomically from the STM context and +-- updates and traces the global state TVar. handleReceivedIds :: - forall m peerAddr ticketNo object objectId. + forall m peerAddr object objectId. (MonadSTM m, Ord objectId, Ord peerAddr) => Tracer m (TraceDecisionLogic peerAddr objectId object) -> DecisionGlobalStateVar m peerAddr objectId object -> @@ -578,15 +316,15 @@ handleReceivedIds :: -- | sequence of received `objectIds` StrictSeq objectId -> -- | received `objectId`s - Set objectId -> m () -handleReceivedIds tracer sharedVar objectPoolWriter peerAddr reqNo objectIdsSeq objectIds = do - st <- atomically $ do +handleReceivedIds tracer globalStateVar objectPoolWriter peerAddr numIdsInitiallyRequested receivedIdsSeq = do + globalState' <- atomically $ do hasObject <- opwHasObject objectPoolWriter stateTVar - sharedVar - ((\a -> (a, a)) . handleReceivedIdsImpl hasObject peerAddr reqNo objectIdsSeq objectIds) - traceWith tracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedIds" st) + globalStateVar + ( \globalState -> let globalState' = handleReceivedIdsImpl hasObject peerAddr numIdsInitiallyRequested receivedIdsSeq globalState + in (globalState', globalState') ) + traceWith tracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedIds" globalState') -- | Include received `object`s in `DecisionGlobalState`. Return number of `objectIds` -- to be acknowledged and list of `object` to be added to the objectpool. @@ -609,12 +347,12 @@ handleReceivedObjects :: -- | number of objectIds to be acknowledged and objects to be added to the -- objectpool m (Maybe ObjectDiffusionInboundError) -handleReceivedObjects tracer objectSize sharedVar peerAddr objectIdsRequested objectsMap = do +handleReceivedObjects tracer objectSize globalStateVar peerAddr objectIdsRequested objectsMap = do r <- atomically $ do - st <- readTVar sharedVar + st <- readTVar globalStateVar case handleReceivedObjectsImpl objectSize peerAddr objectIdsRequested objectsMap st of r@(Right st') -> - writeTVar sharedVar st' + writeTVar globalStateVar st' $> r r@Left{} -> pure r case r of @@ -649,14 +387,12 @@ submitObjectsToPool tracer objectPoolWriter objects = objectId st@DecisionGlobalState { dgsPeerStates - , dgsObjectsPending , dgsObjectsLiveMultiplicities , dgsRententionTimeouts , dgsObjectsOwtPoolMultiplicities } = st { dgsPeerStates = dgsPeerStates' - , dgsObjectsPending = dgsObjectsPending' , dgsRententionTimeouts = dgsRententionTimeouts' , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' @@ -680,12 +416,6 @@ submitObjectsToPool tracer objectPoolWriter objects = objectId dgsObjectsLiveMultiplicities - dgsObjectsPending' = - Map.insert - objectId - (Just object) - dgsObjectsPending - dgsPeerStates' = Map.update (\ps -> Just $! ps{dpsObjectsPending = Map.insert objectId object (dpsObjectsPending ps)}) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 7ab9bde0b1..68c7215f5a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -82,13 +82,6 @@ data DecisionPeerState objectId object = DecisionPeerState -- ^ A subset of `dpsOutstandingFifo` which were unknown to the peer -- (i.e. requested but not received). We need to track these `objectId`s -- since they need to be acknowledged. - -- - -- We track these `objectId` per peer, rather than in `dgsObjectsPending` map, - -- since that could potentially lead to corrupting the node, not being - -- able to download a `object` which is needed & available from other nodes. - -- TODO: for object diffusion, every requested object must be received, so - -- we don't need to track this. But we should disconnect if the peer hasn't - -- sent us exactly the requested object. , dpsObjectsPending :: !(Map objectId object) -- ^ A set of objects downloaded from the peer. They are not yet -- acknowledged and haven't been sent to the objectpool yet. @@ -119,13 +112,13 @@ instance -- New `objectId` enters `dpsOutstandingFifo` it is also added to `dpsIdsAvailable` -- and `dgsObjectsLiveMultiplicities` (see `acknowledgeObjectIdsImpl`). -- --- When the requested object arrives, the corresponding entry is removed from `dgsObjectsInflightMultiplicities` and it is added to `dgsObjectsPending` (see `handleReceivedObjectsImpl`). +-- When the requested object arrives, the corresponding entry is removed from `dgsObjectsInflightMultiplicities`. -- -- Whenever we choose an `objectId` to acknowledge (either in `acknowledObjectsIds`, -- `handleReceivedObjectsImpl` or -- `pickObjectsToDownload`, we also -- recalculate `dgsObjectsLiveMultiplicities` and only keep live `objectId`s in other maps (e.g. --- `dpsIdsAvailable`, `dgsObjectsPending`). +-- `dpsIdsAvailable`). data DecisionGlobalState peerAddr objectId object = DecisionGlobalState { dgsPeerStates :: !(Map peerAddr (DecisionPeerState objectId object)) -- ^ Map of peer states. @@ -139,39 +132,18 @@ data DecisionGlobalState peerAddr objectId object = DecisionGlobalState -- currently in-flight) -- -- This can intersect with `dpsIdsAvailable`. - , dgsObjectsPending :: !(Map objectId (Maybe object)) - -- ^ Map of `object` which: - -- - -- * were downloaded and added to the objectpool, - -- * are already in the objectpool (`Nothing` is inserted in that case), - -- - -- We only keep live `objectId`, e.g. ones which `objectId` is unacknowledged by - -- at least one peer or has a `dgsRententionTimeouts` entry. - -- - -- /Note:/ previous implementation also needed to explicitly track - -- `objectId`s which were already acknowledged, but are still unacknowledged. - -- In this implementation, this is done using reference counting. - -- - -- This map is useful to acknowledge `objectId`s: it's basically taking the - -- longest prefix which contains entries in `dgsObjectsPending` , dgsObjectsLiveMultiplicities :: !(Map objectId ObjectMultiplicity) - -- ^ We track reference counts of all unacknowledged and dgsRententionTimeouts objectIds. - -- Once the count reaches 0, a object is removed from `dgsObjectsPending`. - -- - -- The `dgsObjectsOwtPoolMultiplicities` map contains a subset of `objectId` which - -- `dgsObjectsLiveMultiplicities` contains. + -- ^ We track counts of live objects. + -- An object is added to the live map when it is inflight, and is only removed + -- after the retention timeout expires. -- - -- /Invariants:/ + -- The value for any key must be always non-zero (strictly positive). -- - -- * the objectId count is equal to multiplicity of objectId in all - -- `dpsOutstandingFifo` sequences; - -- * @Map.keysSet dgsObjectsPending `Set.isSubsetOf` Map.keysSet dgsObjectsLiveMultiplicities@; - -- * all counts are positive integers. + -- The `dgsObjectsOwtPoolMultiplicities` map contains a subset of `dgsObjectsLiveMultiplicities`. , dgsRententionTimeouts :: !(Map Time [objectId]) - -- ^ A set of timeouts for objectIds that have been added to dgsObjectsPending after being - -- inserted into the objectpool. + -- ^ Objects are kept live for a bit longer after having been added to the objectpool. -- - -- We need these short timeouts to avoid re-downloading a `object`. We could + -- We need these short timeouts to avoid re-downloading a `object`. We could -- acknowledge this `objectId` to all peers, when a peer from another -- continent presents us it again. -- @@ -200,6 +172,16 @@ instance ) => NoThunks (DecisionGlobalState peerAddr objectId object) +-- | Merge dpsIdsAvailable from all peers of the global state. +dgsIdsAvailable :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Set objectId +dgsIdsAvailable DecisionGlobalState{dgsPeerStates} = + Set.unions (dpsIdsAvailable <$> (Map.elems dgsPeerStates)) + +-- | Merge dpsObjectsPending from all peers of the global state. +dgsObjectsPending :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Map objectId object +dgsObjectsPending DecisionGlobalState{dgsPeerStates} = + Map.unions (dpsObjectsPending <$> (Map.elems dgsPeerStates)) + -- -- Decisions -- @@ -225,7 +207,7 @@ data PeerDecision objectId object = PeerDecision -- if we have non-acknowledged `objectId`s. , pdObjectsToReqIds :: !(Set objectId) -- ^ objectId's to download. - , pdObjectsOwtPool :: !(Map objectId object) + , pdObjectsToPool :: !(Set objectId) -- ^ list of `object`s to submit to the objectpool. } deriving (Show, Eq) @@ -240,21 +222,21 @@ instance Ord objectId => Semigroup (PeerDecision objectId object) where , pdIdsToReq , pdCanPipelineIdsReq = _ignored , pdObjectsToReqIds - , pdObjectsOwtPool + , pdObjectsToPool } <> PeerDecision { pdIdsToAck = pdIdsToAck' , pdIdsToReq = pdIdsToReq' , pdCanPipelineIdsReq = pdCanPipelineIdsReq' , pdObjectsToReqIds = pdObjectsToReqIds' - , pdObjectsOwtPool = pdObjectsOwtPool' + , pdObjectsToPool = pdObjectsToPool' } = PeerDecision { pdIdsToAck = pdIdsToAck + pdIdsToAck' , pdIdsToReq = pdIdsToReq + pdIdsToReq' , pdCanPipelineIdsReq = pdCanPipelineIdsReq' , pdObjectsToReqIds = pdObjectsToReqIds <> pdObjectsToReqIds' - , pdObjectsOwtPool = pdObjectsOwtPool <> pdObjectsOwtPool' + , pdObjectsToPool = pdObjectsToPool <> pdObjectsToPool' } instance Ord objectId => Monoid (PeerDecision objectId object) where mempty = PeerDecision @@ -262,7 +244,7 @@ instance Ord objectId => Monoid (PeerDecision objectId object) where , pdIdsToReq = 0 , pdCanPipelineIdsReq = False , pdObjectsToReqIds = Set.empty - , pdObjectsOwtPool = Map.empty + , pdObjectsToPool = Set.empty } -- | ObjectLogic tracer. @@ -273,15 +255,17 @@ data TraceDecisionLogic peerAddr objectId object data ObjectDiffusionCounters = ObjectDiffusionCounters - { odcNumObjectsAvailable :: Int - -- ^ objectIds which are not yet downloaded. This is a diff of keys sets of - -- `dgsObjectsLiveMultiplicities` and a sum of `dgsObjectsPending` and - -- `inbubmissionToObjectPoolObjects` maps. - , odcNumObjectsInFlight :: Int + { odcDistinctNumObjectsAvailable :: Int + -- ^ objectIds which are not yet downloaded. + , odcNumDistinctObjectsLive :: Int + -- ^ number of distinct live objects + , odcNumDistinctObjectsInflight :: Int + -- ^ number of distinct in-flight objects. + , odcNumTotalObjectsInflight :: Int -- ^ number of all in-flight objects. - , odcNumObjectsPending :: Int - -- ^ number of all buffered objects (downloaded or not available) - , odcNumObjectsOwtPool :: Int + , odcNumDistinctObjectsPending :: Int + -- ^ number of distinct pending objects (downloaded but not acked) + , odcNumDistinctObjectsOwtPool :: Int -- ^ number of distinct objects which are waiting to be added to the -- objectpool (each peer need to acquire the semaphore to effectively add -- them to the pool) @@ -293,21 +277,18 @@ makeObjectDiffusionCounters :: DecisionGlobalState peerAddr objectId object -> ObjectDiffusionCounters makeObjectDiffusionCounters - DecisionGlobalState + dgs@DecisionGlobalState { dgsObjectsInflightMultiplicities - , dgsObjectsPending , dgsObjectsLiveMultiplicities , dgsObjectsOwtPoolMultiplicities } = ObjectDiffusionCounters - { odcNumObjectsAvailable = - Set.size $ - Map.keysSet dgsObjectsLiveMultiplicities - Set.\\ Map.keysSet dgsObjectsPending - Set.\\ Map.keysSet dgsObjectsOwtPoolMultiplicities - , odcNumObjectsPending = Map.size dgsObjectsPending - , odcNumObjectsOwtPool = Map.size dgsObjectsOwtPoolMultiplicities - , odcNumObjectsInFlight = fromIntegral $ mconcat (Map.elems dgsObjectsInflightMultiplicities) + { odcDistinctNumObjectsAvailable = Set.size $ dgsIdsAvailable dgs + , odcNumDistinctObjectsLive = Map.size dgsObjectsLiveMultiplicities + , odcNumDistinctObjectsInflight = Map.size dgsObjectsInflightMultiplicities + , odcNumTotalObjectsInflight = fromIntegral $ mconcat (Map.elems dgsObjectsInflightMultiplicities) + , odcNumDistinctObjectsPending = Map.size $ dgsObjectsPending dgs + , odcNumDistinctObjectsOwtPool = Map.size dgsObjectsOwtPoolMultiplicities } data ObjectDiffusionInitDelay @@ -350,6 +331,7 @@ data TraceObjectDiffusionInbound objectId object TraceObjectDiffusionInboundRecvControlMessage ControlMessage | TraceObjectDiffusionInboundCanRequestMoreObjects Int | TraceObjectDiffusionInboundCannotRequestMoreObjects Int + | TraceObjectDiffusionInboundDecisionReceived (PeerDecision objectId object) deriving (Eq, Show) data ObjectDiffusionInboundError From 8775a5d6484fd403adee41dc10dd0ff294ec869f Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Wed, 8 Oct 2025 12:31:58 +0200 Subject: [PATCH 10/43] more work on State.hs --- .../ObjectDiffusion/Inbound/V2/Registry.hs | 7 - .../ObjectDiffusion/Inbound/V2/State.hs | 248 +++++++----------- .../ObjectDiffusion/Inbound/V2/Types.hs | 29 +- 3 files changed, 127 insertions(+), 157 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index bbdda2e153..8de5311b4a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -205,13 +205,6 @@ withPeer peerAddr dgsPeerStates - decreaseCount :: Map objectId ObjectMultiplicity -> objectId -> Map objectId ObjectMultiplicity - decreaseCount mmap objectId = - Map.update - (\n -> if n > 1 then Just $! pred n else Nothing) - objectId - mmap - -- Update the dgsObjectsLiveMultiplicities map by decreasing the count of each -- objectId which is part of the dpsOutstandingFifo of this peer. dgsObjectsLiveMultiplicities' = diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index b39d614f07..0b289f48ef 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -22,15 +22,14 @@ import Control.Concurrent.Class.MonadSTM.Strict import Control.Exception (assert) import Control.Monad.Class.MonadTime.SI import Control.Tracer (Tracer, traceWith) -import Data.Foldable (toList) import Data.Foldable qualified as Foldable import Data.Functor (($>)) import Data.Map.Merge.Strict qualified as Map import Data.Map.Strict (Map, findWithDefault) import Data.Map.Strict qualified as Map import Data.Maybe (fromJust) -import Data.Sequence.Strict (StrictSeq) -import Data.Sequence.Strict qualified as StrictSeq +import Data.Sequence qualified as Seq +import Data.Sequence.Strict (StrictSeq, fromStrict) import Data.Set (Set) import Data.Set qualified as Set import Data.Typeable (Typeable) @@ -43,18 +42,9 @@ import System.Random (StdGen) data SizeInBytes -strictSeqPartition :: (a -> Bool) -> StrictSeq a -> (StrictSeq a, StrictSeq a) -strictSeqPartition p xs = go xs StrictSeq.empty StrictSeq.empty - where - go StrictSeq.Empty trues falses = (trues, falses) - go (y StrictSeq.:<| ys) trues falses - | p y = - go ys (trues StrictSeq.:|> y) falses - | otherwise = - go ys trues (falses StrictSeq.:|> y) - -- | Insert received `objectId`s and return the number of objectIds to be acknowledged with next request -- and the updated `DecisionGlobalState`. +-- TODO: check for possible errors in the peer response, raise exception if it happened handleReceivedIdsImpl :: forall peerAddr object objectId. (Ord objectId, Ord peerAddr, HasCallStack) => @@ -78,62 +68,60 @@ handleReceivedIdsImpl { dgsPeerStates , dgsObjectsLiveMultiplicities } = - let peerState = - findWithDefault - (error "ObjectDiffusion.handleReceivedIdsImpl: the peer should appear in dgsPeerStates") - peerAddr - dgsPeerStates - - -- Divide the new objectIds in two: those that are already in the objectpool - -- and those that are not. We'll request some objects from the latter. - (idsAlreadyInPoolSeq, newIdsAvailableSeq) = - strictSeqPartition hasObject receivedIdsSeq - - -- TODO: Stopped there + st + { dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' + , dgsPeerStates = dgsPeerStates' + } + where + peerState@DecisionPeerState + { dpsOutstandingFifo + , dpsIdsAvailable + , dpsNumIdsInflight + } = + findWithDefault + (error "ObjectDiffusion.handleReceivedIdsImpl: the peer should appear in dgsPeerStates") + peerAddr + dgsPeerStates + + -- Divide the new objectIds in two: those that are already in the objectpool + -- and those that are not. We'll request some objects from the latter. + newIdsAvailableSeq = + Seq.filter (not . hasObject) $ fromStrict receivedIdsSeq - -- Add all `objectIds` from `dpsIdsAvailableMap` which are not - -- unacknowledged or already buffered. Unacknowledged objectIds must have - -- already been added to `dpsIdsAvailable` map before. - dpsIdsAvailable' = - StrictSeq.foldl' - (\m objectId -> Set.insert objectId m) - dpsIdsAvailable - ( Set.filter - ( \objectId -> - objectId `notElem` dpsOutstandingFifo - && objectId `Map.notMember` dgsObjectsLiveMultiplicities - ) - newIdsAvailableSeq - ) + -- Add all `objectIds` from `dpsIdsAvailableMap` which are not + -- unacknowledged or already buffered. Unacknowledged objectIds must have + -- already been added to `dpsIdsAvailable` map before. + dpsIdsAvailable' = + Foldable.foldl' + (\m objectId -> Set.insert objectId m) + dpsIdsAvailable + ( Seq.filter + ( \objectId -> + objectId `notElem` dpsOutstandingFifo + && objectId `Map.notMember` dgsObjectsLiveMultiplicities + ) + newIdsAvailableSeq + ) - -- Add received objectIds to `dpsOutstandingFifo`. - dpsOutstandingFifo' = dpsOutstandingFifo <> receivedIdsSeq + -- Add received objectIds to `dpsOutstandingFifo`. + dpsOutstandingFifo' = dpsOutstandingFifo <> receivedIdsSeq - dgsObjectsLiveMultiplicities' = - Foldable.foldl' - ( flip $ - Map.alter - ( \case - Nothing -> Just $! 1 - Just cnt -> Just $! succ cnt - ) - ) - dgsObjectsLiveMultiplicities - receivedIdsSeq + dgsObjectsLiveMultiplicities' = + Foldable.foldl' + decreaseCount + dgsObjectsLiveMultiplicities + receivedIdsSeq - st' = - st - { dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' + peerState' = + assert + (dpsNumIdsInflight >= numIdsInitiallyRequested) + peerState + { dpsIdsAvailable = dpsIdsAvailable' + , dpsOutstandingFifo = dpsOutstandingFifo' + , dpsNumIdsInflight = dpsNumIdsInflight - numIdsInitiallyRequested } - ps' = - assert - (dpsNumIdsInflight >= numIdsInitiallyRequested) - ps - { dpsIdsAvailable = dpsIdsAvailable' - , dpsOutstandingFifo = dpsOutstandingFifo' - , dpsNumIdsInflight = dpsNumIdsInflight - numIdsInitiallyRequested - } - in undefined + + dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates handleReceivedObjectsImpl :: forall peerAddr object objectId. @@ -161,74 +149,52 @@ handleReceivedObjectsImpl peerAddr requestedObjectIdsMap receivedObjects - st@DecisionGlobalState{dgsPeerStates} = + st@DecisionGlobalState + { dgsPeerStates + } = do + + let + peerState@DecisionPeerState + { dpsOutstandingFifo + , dpsIdsAvailable + , dpsObjectsInflightIds + , dpsObjectsPending + , dpsNumIdsInflight + } = + findWithDefault + (error "ObjectDiffusion.handleReceivedIdsImpl: the peer should appear in dgsPeerStates") + peerAddr + dgsPeerStates -- using `alterF` so the update of `DecisionPeerState` is done in one lookup - case Map.alterF - (fmap Just . fn . fromJust) - peerAddr - dgsPeerStates of - (Right st', dgsPeerStates') -> - Right st'{dgsPeerStates = dgsPeerStates'} - (Left e, _) -> - Left $ ProtocolErrorObjectSizeError e - where + -- case Map.alterF + -- (fmap Just . fn . fromJust) + -- peerAddr + -- dgsPeerStates of + -- (st', dgsPeerStates') -> + -- st'{dgsPeerStates = dgsPeerStates'} -- Update `DecisionPeerState` and partially update `DecisionGlobalState` (except of -- `dgsPeerStates`). - fn :: - DecisionPeerState objectId object -> - ( Either - [(objectId, SizeInBytes, SizeInBytes)] - (DecisionGlobalState peerAddr objectId object) - , DecisionPeerState objectId object - ) - fn ps = - case wrongSizedObjects of - [] -> - ( Right st' - , ps'' - ) - _ -> - ( Left wrongSizedObjects - , ps - ) - where - wrongSizedObjects :: [(objectId, SizeInBytes, SizeInBytes)] - wrongSizedObjects = - map (\(a, (b, c)) -> (a, b, c)) - . Map.toList - $ Map.merge - Map.dropMissing - Map.dropMissing - ( Map.zipWithMaybeMatched \_ receivedSize advertisedSize -> - if receivedSize `checkObjectSize` advertisedSize - then Nothing - else Just (receivedSize, advertisedSize) - ) - (objectSize `Map.map` receivedObjects) - requestedObjectIdsMap - - checkObjectSize :: - SizeInBytes -> - SizeInBytes -> - Bool - checkObjectSize received advertised - | received > advertised = - received - advertised <= const_MAX_OBJECT_SIZE_DISCREPENCY - | otherwise = - advertised - received <= const_MAX_OBJECT_SIZE_DISCREPENCY - + -- fn :: + -- DecisionPeerState objectId object -> + -- ( (DecisionGlobalState peerAddr objectId object) + -- , DecisionPeerState objectId object + -- ) + -- fn ps = + -- ( st' + -- , ps'' + -- ) + -- where requestedObjectIds = Map.keysSet requestedObjectIdsMap notReceived = requestedObjectIds Set.\\ Map.keysSet receivedObjects - dpsObjectsPending' = dpsObjectsPending ps <> receivedObjects - -- Add not received objects to `dpsObjectsRequestedButNotReceivedIds` before acknowledging objectIds. - dpsObjectsRequestedButNotReceivedIds' = dpsObjectsRequestedButNotReceivedIds ps <> notReceived + dpsObjectsPending' = dpsObjectsPending <> receivedObjects + -- TODO: raise error if requested object has not been received + -- subtract requested from in-flight dpsObjectsInflightIds' = - assert (requestedObjectIds `Set.isSubsetOf` dpsObjectsInflightIds ps) $ - dpsObjectsInflightIds ps Set.\\ requestedObjectIds + assert (requestedObjectIds `Set.isSubsetOf` dpsObjectsInflightIds) $ + dpsObjectsInflightIds Set.\\ requestedObjectIds - -- subtract requested from in-flight - dgsObjectsInflightMultiplicities'' = + dgsObjectsInflightMultiplicities' = Map.merge (Map.mapMaybeMissing \_ x -> Just x) (Map.mapMaybeMissing \_ _ -> assert False Nothing) @@ -243,41 +209,27 @@ handleReceivedObjectsImpl (dgsObjectsInflightMultiplicities st) (Map.fromSet (const 1) requestedObjectIds) - st' = - st - { dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities'' - } - - -- -- Update DecisionPeerState -- - -- Remove the downloaded `objectId`s from the dpsIdsAvailable map, this -- guarantees that we won't attempt to download the `objectIds` from this peer -- once we collect the `objectId`s. Also restrict keys to `liveSet`. - -- - -- NOTE: we could remove `notReceived` from `dpsIdsAvailable`; and - -- possibly avoid using `dpsObjectsRequestedButNotReceivedIds` field at all. - -- - dpsIdsAvailable'' = dpsIdsAvailable ps `Set.difference` requestedObjectIds + dpsIdsAvailable'' = dpsIdsAvailable `Set.difference` requestedObjectIds - -- Remove all acknowledged `objectId`s from unknown set, but only those - -- which are not present in `dpsOutstandingFifo'` - dpsObjectsRequestedButNotReceivedIds'' = - dpsObjectsRequestedButNotReceivedIds' - `Set.intersection` live - where - -- We cannot use `liveSet` as `unknown <> notReceived` might - -- contain `objectIds` which are in `liveSet` but are not `live`. - live = Set.fromList (toList (dpsOutstandingFifo ps)) - - ps'' = - ps + peerState' = + peerState { dpsIdsAvailable = dpsIdsAvailable'' , dpsObjectsInflightIds = dpsObjectsInflightIds' , dpsObjectsPending = dpsObjectsPending' } + dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates + + pure $ st + { dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' + , dgsPeerStates = dgsPeerStates' + } + -- -- Monadic public API -- diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 68c7215f5a..5fe2214a0e 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -3,6 +3,7 @@ {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} @@ -33,6 +34,10 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types , NumObjectsProcessed (..) , TraceObjectDiffusionInbound (..) , ObjectDiffusionInboundError (..) + + -- * Helpers for ObjectMultiplicity maps + , increaseCount + , decreaseCount ) where import Control.Exception (Exception (..)) @@ -321,6 +326,23 @@ newtype ObjectMultiplicity deriving (Monoid) via (Sum Word64) deriving (Show) via (Quiet ObjectMultiplicity) +increaseCount :: Ord k => Map k ObjectMultiplicity -> k -> Map k ObjectMultiplicity +increaseCount mmap k = + Map.alter + (\case + Nothing -> Just $! 1 + Just cnt -> Just $! succ cnt + ) + k + mmap + +decreaseCount :: Ord k => Map k ObjectMultiplicity -> k -> Map k ObjectMultiplicity +decreaseCount mmap k = + Map.update + (\n -> if n > 1 then Just $! pred n else Nothing) + k + mmap + data TraceObjectDiffusionInbound objectId object = -- | Number of objects just about to be inserted. TraceObjectDiffusionInboundCollectedObjects Int @@ -339,14 +361,17 @@ data ObjectDiffusionInboundError | ProtocolErrorObjectIdsNotRequested | ProtocolErrorObjectIdAlreadyKnown | ProtocolErrorObjectIdsDuplicate + | ProtocolErrorObjectMissing deriving Show instance Exception ObjectDiffusionInboundError where displayException ProtocolErrorObjectNotRequested = - "The peer replied with a object we did not ask for." + "The peer replied with an object we did not ask for." displayException ProtocolErrorObjectIdsNotRequested = "The peer replied with more objectIds than we asked for." displayException ProtocolErrorObjectIdAlreadyKnown = "The peer replied with an objectId that it has already sent us previously." displayException ProtocolErrorObjectIdsDuplicate = - "The peer replied with a batch of objectIds containing a duplicate." \ No newline at end of file + "The peer replied with a batch of objectIds containing a duplicate." + displayException ProtocolErrorObjectMissing = + "The peer did not deliver an object for which it claimed to have an id." From 3cd25082d6ca88e8e9088dab0c9b3aa891c83f57 Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Wed, 8 Oct 2025 15:57:12 +0200 Subject: [PATCH 11/43] clean handleReceivedObjectsImpl --- .../ObjectDiffusion/Inbound/V2/State.hs | 66 ++++++------------- 1 file changed, 21 insertions(+), 45 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 0b289f48ef..92bb30b6f8 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -20,6 +20,7 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State import Control.Concurrent.Class.MonadSTM.Strict import Control.Exception (assert) +import Control.Monad (when) import Control.Monad.Class.MonadTime.SI import Control.Tracer (Tracer, traceWith) import Data.Foldable qualified as Foldable @@ -30,7 +31,7 @@ import Data.Map.Strict qualified as Map import Data.Maybe (fromJust) import Data.Sequence qualified as Seq import Data.Sequence.Strict (StrictSeq, fromStrict) -import Data.Set (Set) +import Data.Set ((\\)) import Data.Set qualified as Set import Data.Typeable (Typeable) import GHC.Stack (HasCallStack) @@ -127,11 +128,7 @@ handleReceivedObjectsImpl :: forall peerAddr object objectId. ( Ord peerAddr , Ord objectId - , Show objectId - , Typeable objectId ) => - -- | compute object size - (object -> SizeInBytes) -> peerAddr -> -- | requested objectIds Map objectId SizeInBytes -> @@ -145,75 +142,54 @@ handleReceivedObjectsImpl :: ObjectDiffusionInboundError (DecisionGlobalState peerAddr objectId object) handleReceivedObjectsImpl - objectSize peerAddr requestedObjectIdsMap receivedObjects st@DecisionGlobalState { dgsPeerStates + , dgsObjectsInflightMultiplicities } = do let peerState@DecisionPeerState - { dpsOutstandingFifo - , dpsIdsAvailable + { dpsIdsAvailable , dpsObjectsInflightIds , dpsObjectsPending - , dpsNumIdsInflight } = findWithDefault (error "ObjectDiffusion.handleReceivedIdsImpl: the peer should appear in dgsPeerStates") peerAddr dgsPeerStates - -- using `alterF` so the update of `DecisionPeerState` is done in one lookup - -- case Map.alterF - -- (fmap Just . fn . fromJust) - -- peerAddr - -- dgsPeerStates of - -- (st', dgsPeerStates') -> - -- st'{dgsPeerStates = dgsPeerStates'} - -- Update `DecisionPeerState` and partially update `DecisionGlobalState` (except of - -- `dgsPeerStates`). - -- fn :: - -- DecisionPeerState objectId object -> - -- ( (DecisionGlobalState peerAddr objectId object) - -- , DecisionPeerState objectId object - -- ) - -- fn ps = - -- ( st' - -- , ps'' - -- ) - -- where requestedObjectIds = Map.keysSet requestedObjectIdsMap - notReceived = requestedObjectIds Set.\\ Map.keysSet receivedObjects + receivedObjectIds = Map.keysSet receivedObjects + when (not $ Set.null $ requestedObjectIds \\ receivedObjectIds) $ + Left ProtocolErrorObjectMissing + when (not $ Set.null $ receivedObjectIds \\ requestedObjectIds) $ + Left ProtocolErrorObjectNotRequested + -- past that point we know that `requestedObjectIds` == `receivedObjectIds` + let dpsObjectsPending' = dpsObjectsPending <> receivedObjects - -- TODO: raise error if requested object has not been received -- subtract requested from in-flight dpsObjectsInflightIds' = assert (requestedObjectIds `Set.isSubsetOf` dpsObjectsInflightIds) $ - dpsObjectsInflightIds Set.\\ requestedObjectIds + dpsObjectsInflightIds \\ requestedObjectIds dgsObjectsInflightMultiplicities' = - Map.merge - (Map.mapMaybeMissing \_ x -> Just x) - (Map.mapMaybeMissing \_ _ -> assert False Nothing) - ( Map.zipWithMaybeMatched \_ x y -> - assert - (x >= y) - let z = x - y - in if z > 0 - then Just z - else Nothing + Map.foldrWithKey + (\objectId count m -> + if objectId `Set.member` requestedObjectIds && count > 1 + then Map.insert objectId (count-1) m + else m ) - (dgsObjectsInflightMultiplicities st) - (Map.fromSet (const 1) requestedObjectIds) + Map.empty + dgsObjectsInflightMultiplicities -- Update DecisionPeerState -- -- Remove the downloaded `objectId`s from the dpsIdsAvailable map, this - -- guarantees that we won't attempt to download the `objectIds` from this peer - -- once we collect the `objectId`s. Also restrict keys to `liveSet`. + -- guarantees that we won't attempt to download the `objectIds` from + -- this peer twice. dpsIdsAvailable'' = dpsIdsAvailable `Set.difference` requestedObjectIds peerState' = From 128b6a187cdca442b1afdef522cdd491d4086d63 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Thu, 9 Oct 2025 11:55:14 +0200 Subject: [PATCH 12/43] First diagram attempt --- .../ObjectDiffusion/Inbound/V2.mermaid | 41 ++++++++++++++++ .../ObjectDiffusion/Inbound/V2/Decision.hs | 18 +++---- .../ObjectDiffusion/Inbound/V2/Registry.hs | 2 +- .../ObjectDiffusion/Inbound/V2/State.hs | 29 ++++------- .../ObjectDiffusion/Inbound/V2/Types.hs | 49 +++++++++---------- 5 files changed, 84 insertions(+), 55 deletions(-) create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid new file mode 100644 index 0000000000..f0913589af --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid @@ -0,0 +1,41 @@ +flowchart TD + A(dpsNumIdsInFlight) + B(dpsOutstandingFifo) + C(dpsObjectsAvailableIds) + D(dpsObjectsInflightIds) + E(dpsObjectsPending) + F(dpsObjectsOwtPool) + + G(dgsObjectsLiveMultiplicities) + H(dgsObjectsInflightMultiplicities) + I(dgsObjectsOwtPoolMultiplicities) + + EA{requestIds} + EA-->|count| A + + EB{receiveIds} + A --> EB + EB -->|ids| B + EB -->|ids| C + + EC{requestObjects} + C --> EC + EC -->|ids| D + EC --> |count| G + EC -->|count| H + + ED{receiveObjects} + D --> ED + H --> ED + ED -->|objects| E + + EE{acknowledgeIds} + B --> EE + E --> EE + EE -->|objects| F + EE -->|count| I + + EF{Added to pool} + F --> EF + G --> EF + I --> EF diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index ed37189db7..e3b284037f 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -167,7 +167,7 @@ pickObjectsToDownload } ( peerAddr , peerObjectState@DecisionPeerState - { dpsIdsAvailable + { dpsObjectsAvailableIds , dpsObjectsInflightIds } ) = @@ -247,14 +247,14 @@ pickObjectsToDownload else Nothing ) ( Map.assocs $ - -- merge `dpsIdsAvailable` with `disObjectsInflightMultiplicities`, so we don't + -- merge `dpsObjectsAvailableIds` with `disObjectsInflightMultiplicities`, so we don't -- need to lookup into `disObjectsInflightMultiplicities` on every `objectId` which - -- is in `dpsIdsAvailable`. + -- is in `dpsObjectsAvailableIds`. Map.merge (Map.mapMaybeMissing \_objectId -> Just . (,0)) Map.dropMissing (Map.zipWithMatched \_objectId -> (,)) - dpsIdsAvailable + dpsObjectsAvailableIds disObjectsInflightMultiplicities -- remove `object`s which were already downloaded by some -- other peer or are in-flight or unknown by this peer. @@ -449,7 +449,7 @@ filterActivePeers { dpsOutstandingFifo , dpsNumIdsInflight , dpsObjectsInflightIds - , dpsIdsAvailable + , dpsObjectsAvailableIds } = ( dpsNumIdsInflight == 0 && dpsNumIdsInflight + numOfUnacked <= dpMaxNumObjectsOutstanding @@ -459,7 +459,7 @@ filterActivePeers where numOfUnacked = fromIntegral (StrictSeq.length dpsOutstandingFifo) downloadable = - dpsIdsAvailable + dpsObjectsAvailableIds `Set.difference` dpsObjectsInflightIds `Set.difference` dpsObjectsRequestedButNotReceivedIds `Set.difference` unrequestable @@ -524,7 +524,7 @@ acknowledgeObjectIds decisionPolicy globalState ps@DecisionPeerState - { dpsIdsAvailable + { dpsObjectsAvailableIds , dpsNumIdsInflight , dpsObjectsPending , dpsObjectsOwtPool @@ -539,7 +539,7 @@ acknowledgeObjectIds , refCountDiff , ps { dpsOutstandingFifo = dpsOutstandingFifo' - , dpsIdsAvailable = dpsIdsAvailable' + , dpsObjectsAvailableIds = dpsObjectsAvailableIds' , dpsNumIdsInflight = dpsNumIdsInflight + pdIdsToReq @@ -584,7 +584,7 @@ acknowledgeObjectIds -- the set of live `objectIds` liveSet = Set.fromList (toList dpsOutstandingFifo') - dpsIdsAvailable' = dpsIdsAvailable `Set.intersection` liveSet + dpsObjectsAvailableIds' = dpsObjectsAvailableIds `Set.intersection` liveSet -- We remove all acknowledged `objectId`s which are not in -- `dpsOutstandingFifo''`, but also return the unknown set before any diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 8de5311b4a..e0558b21b6 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -160,7 +160,7 @@ withPeer Map.insert peerAddr DecisionPeerState - { dpsIdsAvailable = Set.empty + { dpsObjectsAvailableIds = Set.empty , dpsNumIdsInflight = 0 , dpsObjectsInflightIds = Set.empty , dpsOutstandingFifo = StrictSeq.empty diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 92bb30b6f8..ab647bb229 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -76,7 +76,7 @@ handleReceivedIdsImpl where peerState@DecisionPeerState { dpsOutstandingFifo - , dpsIdsAvailable + , dpsObjectsAvailableIds , dpsNumIdsInflight } = findWithDefault @@ -89,13 +89,13 @@ handleReceivedIdsImpl newIdsAvailableSeq = Seq.filter (not . hasObject) $ fromStrict receivedIdsSeq - -- Add all `objectIds` from `dpsIdsAvailableMap` which are not + -- Add all `objectIds` from `dpsObjectsAvailableIdsMap` which are not -- unacknowledged or already buffered. Unacknowledged objectIds must have - -- already been added to `dpsIdsAvailable` map before. - dpsIdsAvailable' = + -- already been added to `dpsObjectsAvailableIds` map before. + dpsObjectsAvailableIds' = Foldable.foldl' (\m objectId -> Set.insert objectId m) - dpsIdsAvailable + dpsObjectsAvailableIds ( Seq.filter ( \objectId -> objectId `notElem` dpsOutstandingFifo @@ -117,7 +117,7 @@ handleReceivedIdsImpl assert (dpsNumIdsInflight >= numIdsInitiallyRequested) peerState - { dpsIdsAvailable = dpsIdsAvailable' + { dpsObjectsAvailableIds = dpsObjectsAvailableIds' , dpsOutstandingFifo = dpsOutstandingFifo' , dpsNumIdsInflight = dpsNumIdsInflight - numIdsInitiallyRequested } @@ -152,7 +152,7 @@ handleReceivedObjectsImpl let peerState@DecisionPeerState - { dpsIdsAvailable + { dpsObjectsAvailableIds , dpsObjectsInflightIds , dpsObjectsPending } = @@ -187,14 +187,14 @@ handleReceivedObjectsImpl -- Update DecisionPeerState -- - -- Remove the downloaded `objectId`s from the dpsIdsAvailable map, this + -- Remove the downloaded `objectId`s from the dpsObjectsAvailableIds map, this -- guarantees that we won't attempt to download the `objectIds` from -- this peer twice. - dpsIdsAvailable'' = dpsIdsAvailable `Set.difference` requestedObjectIds + dpsObjectsAvailableIds'' = dpsObjectsAvailableIds `Set.difference` requestedObjectIds peerState' = peerState - { dpsIdsAvailable = dpsIdsAvailable'' + { dpsObjectsAvailableIds = dpsObjectsAvailableIds'' , dpsObjectsInflightIds = dpsObjectsInflightIds' , dpsObjectsPending = dpsObjectsPending' } @@ -223,7 +223,6 @@ newDecisionGlobalStateVar rng = { dgsPeerStates = Map.empty , dgsObjectsInflightMultiplicities = Map.empty , dgsObjectsLiveMultiplicities = Map.empty - , dgsRententionTimeouts = Map.empty , dgsObjectsOwtPoolMultiplicities = Map.empty , dgsRng = rng } @@ -316,12 +315,10 @@ submitObjectsToPool tracer objectPoolWriter objects = st@DecisionGlobalState { dgsPeerStates , dgsObjectsLiveMultiplicities - , dgsRententionTimeouts , dgsObjectsOwtPoolMultiplicities } = st { dgsPeerStates = dgsPeerStates' - , dgsRententionTimeouts = dgsRententionTimeouts' , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' } @@ -332,12 +329,6 @@ submitObjectsToPool tracer objectPoolWriter objects = objectId dgsObjectsOwtPoolMultiplicities - dgsRententionTimeouts' = - Map.alter - (\case Nothing -> Just [objectId]; Just objectIds -> Just (objectId : objectIds)) - (addTime dpMinObtainedButNotAckedObjectsLifetime now) - dgsRententionTimeouts - dgsObjectsLiveMultiplicities' = Map.alter (\case Nothing -> Just 1; Just n -> Just $! succ n) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 5fe2214a0e..0668998e7a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -69,18 +69,18 @@ import Quiet (Quiet (..)) -- speaking of objects (i.e. after they have been requested) but identify them -- by their IDs for this field purpose. data DecisionPeerState objectId object = DecisionPeerState - { dpsOutstandingFifo :: !(StrictSeq objectId) + { dpsNumIdsInflight :: !NumObjectIdsReq + -- ^ The number of object identifiers that we have requested but + -- which have not yet been replied to. We need to track this it keep + -- our requests within the limit on the number of unacknowledged objectIds. + , dpsOutstandingFifo :: !(StrictSeq objectId) -- ^ Those objects (by their identifier) that the client has told -- us about, and which we have not yet acknowledged. This is kept in -- the order in which the client gave them to us. This is the same order -- in which we submit them to the objectpool. It is also the order -- in which we acknowledge them. - , dpsIdsAvailable :: !(Set objectId) + , dpsObjectsAvailableIds :: !(Set objectId) -- ^ Set of known object ids which can be requested from this peer. - , dpsNumIdsInflight :: !NumObjectIdsReq - -- ^ The number of object identifiers that we have requested but - -- which have not yet been replied to. We need to track this it keep - -- our requests within the limit on the number of unacknowledged objectIds. , dpsObjectsInflightIds :: !(Set objectId) -- ^ The set of requested objects (by their ids). -- , dpsObjectsRequestedButNotReceivedIds :: !(Set objectId) @@ -114,7 +114,7 @@ instance -- | Shared state of all `ObjectDiffusion` clients. -- --- New `objectId` enters `dpsOutstandingFifo` it is also added to `dpsIdsAvailable` +-- New `objectId` enters `dpsOutstandingFifo` it is also added to `dpsObjectsAvailableIds` -- and `dgsObjectsLiveMultiplicities` (see `acknowledgeObjectIdsImpl`). -- -- When the requested object arrives, the corresponding entry is removed from `dgsObjectsInflightMultiplicities`. @@ -123,7 +123,7 @@ instance -- `handleReceivedObjectsImpl` or -- `pickObjectsToDownload`, we also -- recalculate `dgsObjectsLiveMultiplicities` and only keep live `objectId`s in other maps (e.g. --- `dpsIdsAvailable`). +-- `dpsObjectsAvailableIds`). data DecisionGlobalState peerAddr objectId object = DecisionGlobalState { dgsPeerStates :: !(Map peerAddr (DecisionPeerState objectId object)) -- ^ Map of peer states. @@ -131,12 +131,6 @@ data DecisionGlobalState peerAddr objectId object = DecisionGlobalState -- /Invariant:/ for peerAddr's which are registered using `withPeer`, -- there's always an entry in this map even if the set of `objectId`s is -- empty. - , dgsObjectsInflightMultiplicities :: !(Map objectId ObjectMultiplicity) - -- ^ Map from object ids of objects which are in-flight (have already been - -- requested) to their multiplicities (from how many peers it is - -- currently in-flight) - -- - -- This can intersect with `dpsIdsAvailable`. , dgsObjectsLiveMultiplicities :: !(Map objectId ObjectMultiplicity) -- ^ We track counts of live objects. -- An object is added to the live map when it is inflight, and is only removed @@ -145,14 +139,12 @@ data DecisionGlobalState peerAddr objectId object = DecisionGlobalState -- The value for any key must be always non-zero (strictly positive). -- -- The `dgsObjectsOwtPoolMultiplicities` map contains a subset of `dgsObjectsLiveMultiplicities`. - , dgsRententionTimeouts :: !(Map Time [objectId]) - -- ^ Objects are kept live for a bit longer after having been added to the objectpool. - -- - -- We need these short timeouts to avoid re-downloading a `object`. We could - -- acknowledge this `objectId` to all peers, when a peer from another - -- continent presents us it again. + , dgsObjectsInflightMultiplicities :: !(Map objectId ObjectMultiplicity) + -- ^ Map from object ids of objects which are in-flight (have already been + -- requested) to their multiplicities (from how many peers it is + -- currently in-flight) -- - -- Every objectId entry has a reference count in `dgsObjectsLiveMultiplicities`. + -- This can intersect with `dpsObjectsAvailableIds`. , dgsObjectsOwtPoolMultiplicities :: !(Map objectId ObjectMultiplicity) -- ^ A set of objectIds that have been downloaded by a peer and are on their -- way to the objectpool. We won't issue further fetch-requests for objects in @@ -177,16 +169,21 @@ instance ) => NoThunks (DecisionGlobalState peerAddr objectId object) --- | Merge dpsIdsAvailable from all peers of the global state. -dgsIdsAvailable :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Set objectId -dgsIdsAvailable DecisionGlobalState{dgsPeerStates} = - Set.unions (dpsIdsAvailable <$> (Map.elems dgsPeerStates)) +-- | Merge dpsObjectsAvailableIds from all peers of the global state. +dgsObjectsAvailableIds :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Set objectId +dgsObjectsAvailableIds DecisionGlobalState{dgsPeerStates} = + Set.unions (dpsObjectsAvailableIds <$> (Map.elems dgsPeerStates)) -- | Merge dpsObjectsPending from all peers of the global state. dgsObjectsPending :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Map objectId object dgsObjectsPending DecisionGlobalState{dgsPeerStates} = Map.unions (dpsObjectsPending <$> (Map.elems dgsPeerStates)) +-- | Merge dpsObjectsOwtPool from all peers of the global state. +dgsObjectsOwtPool :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Map objectId object +dgsObjectsOwtPool DecisionGlobalState{dgsPeerStates} = + Map.unions (dpsObjectsOwtPool <$> (Map.elems dgsPeerStates)) + -- -- Decisions -- @@ -288,7 +285,7 @@ makeObjectDiffusionCounters , dgsObjectsOwtPoolMultiplicities } = ObjectDiffusionCounters - { odcDistinctNumObjectsAvailable = Set.size $ dgsIdsAvailable dgs + { odcDistinctNumObjectsAvailable = Set.size $ dgsObjectsAvailableIds dgs , odcNumDistinctObjectsLive = Map.size dgsObjectsLiveMultiplicities , odcNumDistinctObjectsInflight = Map.size dgsObjectsInflightMultiplicities , odcNumTotalObjectsInflight = fromIntegral $ mconcat (Map.elems dgsObjectsInflightMultiplicities) From 36ab9e9a51b6a4005f6673a26a1c70ae838698e6 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Thu, 9 Oct 2025 14:46:10 +0200 Subject: [PATCH 13/43] Continue on handleReceivedObjectsImpl --- .../ObjectDiffusion/Inbound/V2/State.hs | 87 +++++++------------ 1 file changed, 33 insertions(+), 54 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index ab647bb229..ca3747a656 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -31,7 +31,7 @@ import Data.Map.Strict qualified as Map import Data.Maybe (fromJust) import Data.Sequence qualified as Seq import Data.Sequence.Strict (StrictSeq, fromStrict) -import Data.Set ((\\)) +import Data.Set ((\\), Set) import Data.Set qualified as Set import Data.Typeable (Typeable) import GHC.Stack (HasCallStack) @@ -131,77 +131,63 @@ handleReceivedObjectsImpl :: ) => peerAddr -> -- | requested objectIds - Map objectId SizeInBytes -> + Set objectId -> -- | received objects Map objectId object -> DecisionGlobalState peerAddr objectId object -> - -- | Return list of `objectId` which sizes didn't match or a new state. - -- If one of the `object` has wrong size, we return an error. The - -- mini-protocol will throw, which will clean the state map from this peer. - Either - ObjectDiffusionInboundError - (DecisionGlobalState peerAddr objectId object) + DecisionGlobalState peerAddr objectId object handleReceivedObjectsImpl peerAddr - requestedObjectIdsMap - receivedObjects + objectsRequestedIds + objectsReceived st@DecisionGlobalState { dgsPeerStates , dgsObjectsInflightMultiplicities } = do - let peerState@DecisionPeerState - { dpsObjectsAvailableIds - , dpsObjectsInflightIds + { dpsObjectsInflightIds , dpsObjectsPending } = findWithDefault - (error "ObjectDiffusion.handleReceivedIdsImpl: the peer should appear in dgsPeerStates") + (error "ObjectDiffusion.handleReceivedObjectsImpl: the peer should appear in dgsPeerStates") peerAddr dgsPeerStates - requestedObjectIds = Map.keysSet requestedObjectIdsMap - receivedObjectIds = Map.keysSet receivedObjects - when (not $ Set.null $ requestedObjectIds \\ receivedObjectIds) $ - Left ProtocolErrorObjectMissing - when (not $ Set.null $ receivedObjectIds \\ requestedObjectIds) $ - Left ProtocolErrorObjectNotRequested - -- past that point we know that `requestedObjectIds` == `receivedObjectIds` + -- TODO: error handling should be done by the client before using the API + -- past that point we assume + -- assert (objectsRequestedIds `Set.isSubsetOf` dpsObjectsInflightIds) $ + -- assert (Map.keysSet objectsReceived == objectsRequestedIds) $ do let - dpsObjectsPending' = dpsObjectsPending <> receivedObjects + dpsObjectsPending' = dpsObjectsPending <> objectsReceived -- subtract requested from in-flight dpsObjectsInflightIds' = - assert (requestedObjectIds `Set.isSubsetOf` dpsObjectsInflightIds) $ - dpsObjectsInflightIds \\ requestedObjectIds + dpsObjectsInflightIds \\ objectsRequestedIds dgsObjectsInflightMultiplicities' = - Map.foldrWithKey - (\objectId count m -> - if objectId `Set.member` requestedObjectIds && count > 1 - then Map.insert objectId (count-1) m - else m - ) - Map.empty + Foldable.foldl' + decreaseCount dgsObjectsInflightMultiplicities + objectsRequestedIds -- Update DecisionPeerState -- -- Remove the downloaded `objectId`s from the dpsObjectsAvailableIds map, this -- guarantees that we won't attempt to download the `objectIds` from -- this peer twice. - dpsObjectsAvailableIds'' = dpsObjectsAvailableIds `Set.difference` requestedObjectIds + -- TODO: this is wrong, it should be done earlier when the request for objects is emitted + -- aka in when the decision for this peer is emitted/read? + -- dpsObjectsAvailableIds'' = dpsObjectsAvailableIds `Set.difference` objectsRequestedIds peerState' = peerState - { dpsObjectsAvailableIds = dpsObjectsAvailableIds'' - , dpsObjectsInflightIds = dpsObjectsInflightIds' + { dpsObjectsInflightIds = dpsObjectsInflightIds' , dpsObjectsPending = dpsObjectsPending' } dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates - pure $ st + st { dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' , dgsPeerStates = dgsPeerStates' } @@ -253,8 +239,8 @@ handleReceivedIds tracer globalStateVar objectPoolWriter peerAddr numIdsInitiall in (globalState', globalState') ) traceWith tracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedIds" globalState') --- | Include received `object`s in `DecisionGlobalState`. Return number of `objectIds` --- to be acknowledged and list of `object` to be added to the objectpool. +-- | Wrapper around `handleReceivedObjectsImpl` that updates and traces the +-- global state TVar. handleReceivedObjects :: forall m peerAddr object objectId. ( MonadSTM m @@ -264,29 +250,22 @@ handleReceivedObjects :: , Typeable objectId ) => Tracer m (TraceDecisionLogic peerAddr objectId object) -> - (object -> SizeInBytes) -> DecisionGlobalStateVar m peerAddr objectId object -> peerAddr -> - -- | set of requested objectIds with their announced size - Map objectId SizeInBytes -> + -- | set of requested objectIds + Set objectId -> -- | received objects Map objectId object -> -- | number of objectIds to be acknowledged and objects to be added to the -- objectpool - m (Maybe ObjectDiffusionInboundError) -handleReceivedObjects tracer objectSize globalStateVar peerAddr objectIdsRequested objectsMap = do - r <- atomically $ do - st <- readTVar globalStateVar - case handleReceivedObjectsImpl objectSize peerAddr objectIdsRequested objectsMap st of - r@(Right st') -> - writeTVar globalStateVar st' - $> r - r@Left{} -> pure r - case r of - Right st -> - traceWith tracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedObjects" st) - $> Nothing - Left e -> return (Just e) + m () +handleReceivedObjects tracer globalStateVar peerAddr objectsRequestedIds objectsReceived = do + globalState' <- atomically $ do + stateTVar + globalStateVar + ( \globalState -> let globalState' = handleReceivedObjectsImpl peerAddr objectsRequestedIds objectsReceived globalState + in (globalState', globalState') ) + traceWith tracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedObjects" globalState') submitObjectsToPool :: Tracer m (TraceObjectDiffusionInbound objectId object) -> ObjectPoolWriter objectId object m -> [object] -> m () From ffe6cf48f33bcf86735e3aee81d33f7586269e1c Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Thu, 9 Oct 2025 21:30:57 +0200 Subject: [PATCH 14/43] Stabilize State.hs, plumbing in Registry.hs --- .../ObjectDiffusion/Inbound/V2/Registry.hs | 35 ++--- .../ObjectDiffusion/Inbound/V2/State.hs | 120 +++++++++--------- .../ObjectDiffusion/Inbound/V2/Types.hs | 15 ++- 3 files changed, 91 insertions(+), 79 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index e0558b21b6..24f6c24e9c 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -19,7 +19,6 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry import Control.Concurrent.Class.MonadMVar.Strict import Control.Concurrent.Class.MonadSTM.Strict -import Control.Concurrent.Class.MonadSTM.TSem import Control.Monad.Class.MonadFork import Control.Monad.Class.MonadThrow import Control.Monad.Class.MonadTime.SI @@ -31,7 +30,6 @@ import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set -import Data.Typeable (Typeable) import Data.Void (Void) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy @@ -53,17 +51,11 @@ newPeerDecisionChannelsVar :: MonadMVar m => m (PeerDecisionChannelsVar m peerAddr objectId object) newPeerDecisionChannelsVar = newMVar (Map.empty) --- | Semaphore to guard access to the ObjectPool -newtype ObjectPoolSem m = ObjectPoolSem (TSem m) - -newObjectPoolSem :: MonadSTM m => m (ObjectPoolSem m) -newObjectPoolSem = ObjectPoolSem <$> atomically (newTSem 1) - data InboundPeerAPI m objectId object = InboundPeerAPI { readPeerDecision :: m (PeerDecision objectId object) -- ^ a blocking action which reads `PeerDecision` - , handleReceivedIds :: [objectId] -> m () - , handleReceivedObjects :: [object] -> m () + , handleReceivedIds :: StrictSeq.StrictSeq objectId -> m () + , handleReceivedObjects :: Map objectId object -> m () , submitObjectsToPool :: [object] -> m () } @@ -75,14 +67,12 @@ withPeer :: ( MonadMask m , MonadMVar m , MonadSTM m - , MonadMonotonicTime m , Ord objectId - , Show objectId - , Typeable objectId , Ord peerAddr , Show peerAddr ) => Tracer m (TraceDecisionLogic peerAddr objectId object) -> + Tracer m (TraceObjectDiffusionInbound objectId object) -> PeerDecisionChannelsVar m peerAddr objectId object -> ObjectPoolSem m -> DecisionPolicy -> @@ -96,8 +86,9 @@ withPeer :: m a withPeer decisionTracer + objectDiffusionTracer decisionChannelsVar - (ObjectPoolSem poolSem) + objectPoolSem decisionPolicy globalStateVar objectPoolReader @@ -127,8 +118,21 @@ withPeer , InboundPeerAPI { readPeerDecision = takeMVar chan' , handleReceivedIds = State.handleReceivedIds + decisionTracer + globalStateVar + objectPoolWriter + peerAddr + (error "TODO: provide the number of requested IDs") , handleReceivedObjects = State.handleReceivedObjects + decisionTracer + globalStateVar + peerAddr , submitObjectsToPool = State.submitObjectsToPool + objectDiffusionTracer + objectPoolSem + globalStateVar + objectPoolWriter + peerAddr } ) -- register the peer in the global state now @@ -235,6 +239,7 @@ decisionLogicThread :: , MonadMVar m , MonadSTM m , MonadFork m + , MonadMask m , Ord peerAddr , Ord objectId , Hashable peerAddr @@ -284,7 +289,7 @@ decisionLogicThread decisionTracer countersTracer decisionPolicy decisionChannel traceWith countersTracer (makeObjectDiffusionCounters globalState') -- Variant of modifyMVar_ that puts a default value if the MVar is empty. -modifyMVarWithDefault_ :: StrictMVar m a -> a -> (a -> m a) -> m () +modifyMVarWithDefault_ :: (MonadMask m, MonadMVar m) => StrictMVar m a -> a -> (a -> m a) -> m () modifyMVarWithDefault_ m d io = mask $ \restore -> do mbA <- tryTakeMVar m diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index ca3747a656..f2b9898772 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -1,11 +1,8 @@ {-# LANGUAGE BlockArguments #-} {-# LANGUAGE ImportQualifiedPost #-} -{-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE TupleSections #-} -{-# LANGUAGE BangPatterns #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State ( -- * Core API @@ -19,30 +16,23 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State ) where import Control.Concurrent.Class.MonadSTM.Strict +import Control.Concurrent.Class.MonadSTM.TSem import Control.Exception (assert) -import Control.Monad (when) -import Control.Monad.Class.MonadTime.SI import Control.Tracer (Tracer, traceWith) import Data.Foldable qualified as Foldable -import Data.Functor (($>)) -import Data.Map.Merge.Strict qualified as Map import Data.Map.Strict (Map, findWithDefault) import Data.Map.Strict qualified as Map -import Data.Maybe (fromJust) import Data.Sequence qualified as Seq import Data.Sequence.Strict (StrictSeq, fromStrict) -import Data.Set ((\\), Set) +import Data.Set ((\\)) import Data.Set qualified as Set -import Data.Typeable (Typeable) import GHC.Stack (HasCallStack) -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (ObjectPoolWriter (opwHasObject, opwObjectId)) +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (ObjectPoolWriter (..)) +import Ouroboros.Consensus.Util.IOLike (MonadMask, MonadMVar, bracket_) import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq) import System.Random (StdGen) -data SizeInBytes - -- | Insert received `objectId`s and return the number of objectIds to be acknowledged with next request -- and the updated `DecisionGlobalState`. -- TODO: check for possible errors in the peer response, raise exception if it happened @@ -130,21 +120,28 @@ handleReceivedObjectsImpl :: , Ord objectId ) => peerAddr -> - -- | requested objectIds - Set objectId -> -- | received objects Map objectId object -> DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object handleReceivedObjectsImpl peerAddr - objectsRequestedIds objectsReceived st@DecisionGlobalState { dgsPeerStates , dgsObjectsInflightMultiplicities - } = do - let + } = + -- TODO: error handling should be done by the client before using the API + -- past that point we assume: + -- assert (objectsRequestedIds `Set.isSubsetOf` dpsObjectsInflightIds) $ + -- assert (objectsReceivedIds == objectsRequestedIds) $ + st + { dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' + , dgsPeerStates = dgsPeerStates' + } + where + objectsReceivedIds = Map.keysSet objectsReceived + peerState@DecisionPeerState { dpsObjectsInflightIds , dpsObjectsPending @@ -153,22 +150,18 @@ handleReceivedObjectsImpl (error "ObjectDiffusion.handleReceivedObjectsImpl: the peer should appear in dgsPeerStates") peerAddr dgsPeerStates - -- TODO: error handling should be done by the client before using the API - -- past that point we assume - -- assert (objectsRequestedIds `Set.isSubsetOf` dpsObjectsInflightIds) $ - -- assert (Map.keysSet objectsReceived == objectsRequestedIds) $ do - let + dpsObjectsPending' = dpsObjectsPending <> objectsReceived -- subtract requested from in-flight dpsObjectsInflightIds' = - dpsObjectsInflightIds \\ objectsRequestedIds + dpsObjectsInflightIds \\ objectsReceivedIds dgsObjectsInflightMultiplicities' = Foldable.foldl' decreaseCount dgsObjectsInflightMultiplicities - objectsRequestedIds + objectsReceivedIds -- Update DecisionPeerState -- @@ -177,7 +170,7 @@ handleReceivedObjectsImpl -- this peer twice. -- TODO: this is wrong, it should be done earlier when the request for objects is emitted -- aka in when the decision for this peer is emitted/read? - -- dpsObjectsAvailableIds'' = dpsObjectsAvailableIds `Set.difference` objectsRequestedIds + -- dpsObjectsAvailableIds'' = dpsObjectsAvailableIds `Set.difference` objectsReceivedIds peerState' = peerState @@ -187,11 +180,6 @@ handleReceivedObjectsImpl dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates - st - { dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' - , dgsPeerStates = dgsPeerStates' - } - -- -- Monadic public API -- @@ -246,76 +234,84 @@ handleReceivedObjects :: ( MonadSTM m , Ord objectId , Ord peerAddr - , Show objectId - , Typeable objectId ) => Tracer m (TraceDecisionLogic peerAddr objectId object) -> DecisionGlobalStateVar m peerAddr objectId object -> peerAddr -> - -- | set of requested objectIds - Set objectId -> -- | received objects Map objectId object -> - -- | number of objectIds to be acknowledged and objects to be added to the - -- objectpool m () -handleReceivedObjects tracer globalStateVar peerAddr objectsRequestedIds objectsReceived = do +handleReceivedObjects tracer globalStateVar peerAddr objectsReceived = do globalState' <- atomically $ do stateTVar globalStateVar - ( \globalState -> let globalState' = handleReceivedObjectsImpl peerAddr objectsRequestedIds objectsReceived globalState + ( \globalState -> let globalState' = handleReceivedObjectsImpl peerAddr objectsReceived globalState in (globalState', globalState') ) traceWith tracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedObjects" globalState') submitObjectsToPool :: - Tracer m (TraceObjectDiffusionInbound objectId object) -> ObjectPoolWriter objectId object m -> [object] -> m () -submitObjectsToPool tracer objectPoolWriter objects = + forall m peerAddr object objectId. + ( Ord objectId + , Ord peerAddr + , MonadMask m + , MonadMVar m + , MonadSTM m + ) => + Tracer m (TraceObjectDiffusionInbound objectId object) -> + ObjectPoolSem m -> + DecisionGlobalStateVar m peerAddr objectId object -> + ObjectPoolWriter objectId object m -> + peerAddr -> + [object] -> + m () +submitObjectsToPool + tracer + (ObjectPoolSem poolSem) + globalStateVar + objectPoolWriter + peerAddr + objects = bracket_ (atomically $ waitTSem poolSem) (atomically $ signalTSem poolSem) $ do opwAddObjects objectPoolWriter objects - now <- getMonotonicTime - traceWith tracer (TraceObjectDiffusionInboundSubmittedObjects (NumObjectsProcessed $ length objects)) + traceWith tracer $ + TraceObjectDiffusionInboundCollectedObjects $ + NumObjectsProcessed $ fromIntegral $ length objects atomically $ let getId = opwObjectId objectPoolWriter in modifyTVar globalStateVar $ \globalState -> - foldl' (\st object -> updateObjectsOwtPool now (getId object) object st) globalState + Foldable.foldl' + (\st object -> updateObjectsOwtPool (getId object) object st) + globalState + objects where updateObjectsOwtPool :: - Time -> objectId -> object -> DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object updateObjectsOwtPool - now objectId + object st@DecisionGlobalState - { dgsPeerStates - , dgsObjectsLiveMultiplicities + { dgsObjectsLiveMultiplicities , dgsObjectsOwtPoolMultiplicities + , dgsPeerStates } = st - { dgsPeerStates = dgsPeerStates' - , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' + { dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' + , dgsPeerStates = dgsPeerStates' } where - dgsObjectsOwtPoolMultiplicities' = - Map.update - (\case 1 -> Nothing; n -> Just $! pred n) - objectId - dgsObjectsOwtPoolMultiplicities + dgsObjectsOwtPoolMultiplicities' = decreaseCount dgsObjectsOwtPoolMultiplicities objectId - dgsObjectsLiveMultiplicities' = - Map.alter - (\case Nothing -> Just 1; Just n -> Just $! succ n) - objectId - dgsObjectsLiveMultiplicities + dgsObjectsLiveMultiplicities' = increaseCount dgsObjectsLiveMultiplicities objectId dgsPeerStates' = Map.update (\ps -> Just $! ps{dpsObjectsPending = Map.insert objectId object (dpsObjectsPending ps)}) peerAddr - dgsPeerStates \ No newline at end of file + dgsPeerStates diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 0668998e7a..a14a554cf4 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -38,8 +38,14 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -- * Helpers for ObjectMultiplicity maps , increaseCount , decreaseCount + + -- * Object pool semaphore + , ObjectPoolSem (..) + , newObjectPoolSem ) where +import Control.Concurrent.Class.MonadSTM.Strict (MonadSTM, atomically) +import Control.Concurrent.Class.MonadSTM.TSem (TSem, newTSem) import Control.Exception (Exception (..)) import Control.Monad.Class.MonadTime.SI import Data.Map.Strict (Map) @@ -57,11 +63,16 @@ import Ouroboros.Network.ControlMessage (ControlMessage) import Control.DeepSeq (NFData) import Quiet (Quiet (..)) +-- | Semaphore to guard access to the ObjectPool +newtype ObjectPoolSem m = ObjectPoolSem (TSem m) + +newObjectPoolSem :: MonadSTM m => m (ObjectPoolSem m) +newObjectPoolSem = ObjectPoolSem <$> atomically (newTSem 1) + -- -- DecisionPeerState, DecisionGlobalState -- - -- | In all the fields' names, -- If "Ids" appears at the beginning of a name field, it means we refer to IDs -- specifically (i.e. before the corresponding object is in flight). @@ -342,7 +353,7 @@ decreaseCount mmap k = data TraceObjectDiffusionInbound objectId object = -- | Number of objects just about to be inserted. - TraceObjectDiffusionInboundCollectedObjects Int + TraceObjectDiffusionInboundCollectedObjects NumObjectsProcessed | -- | Just processed object pass/fail breakdown. TraceObjectDiffusionInboundAddedObjects Int | -- | Received a 'ControlMessage' from the outbound peer governor, and about From cd4c6a67f80f328a8e207352b3d2bd1654ec52a0 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Fri, 10 Oct 2025 16:05:24 +0200 Subject: [PATCH 15/43] Further update state.hs --- .../ObjectDiffusion/Inbound/V2/State.hs | 77 ++++++------------- .../ObjectDiffusion/Inbound/V2/Types.hs | 21 ++++- 2 files changed, 42 insertions(+), 56 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index f2b9898772..31d38b6043 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -8,8 +8,6 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State ( -- * Core API DecisionGlobalState (..) , DecisionPeerState (..) - , DecisionGlobalStateVar - , newDecisionGlobalStateVar , handleReceivedIds , handleReceivedObjects , submitObjectsToPool @@ -23,8 +21,9 @@ import Data.Foldable qualified as Foldable import Data.Map.Strict (Map, findWithDefault) import Data.Map.Strict qualified as Map import Data.Sequence qualified as Seq -import Data.Sequence.Strict (StrictSeq, fromStrict) -import Data.Set ((\\)) +import Data.Sequence.Strict (StrictSeq) +import Data.Sequence.Strict qualified as StrictSeq +import Data.Set ((\\), Set) import Data.Set qualified as Set import GHC.Stack (HasCallStack) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types @@ -33,6 +32,9 @@ import Ouroboros.Consensus.Util.IOLike (MonadMask, MonadMVar, bracket_) import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq) import System.Random (StdGen) +strictSeqToSet :: Ord a => StrictSeq a -> Set a +strictSeqToSet = Foldable.foldl' (flip Set.insert) Set.empty + -- | Insert received `objectId`s and return the number of objectIds to be acknowledged with next request -- and the updated `DecisionGlobalState`. -- TODO: check for possible errors in the peer response, raise exception if it happened @@ -60,8 +62,7 @@ handleReceivedIdsImpl , dgsObjectsLiveMultiplicities } = st - { dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' - , dgsPeerStates = dgsPeerStates' + { dgsPeerStates = dgsPeerStates' } where peerState@DecisionPeerState @@ -74,35 +75,21 @@ handleReceivedIdsImpl peerAddr dgsPeerStates - -- Divide the new objectIds in two: those that are already in the objectpool - -- and those that are not. We'll request some objects from the latter. - newIdsAvailableSeq = - Seq.filter (not . hasObject) $ fromStrict receivedIdsSeq + -- Filter out objects we are not interesting in downloading + newObjectsAvailableIds = + Set.filter (\objectId -> + (not . hasObject $ objectId) -- object isn't already in the object pool + && objectId `notElem` dpsOutstandingFifo -- object hasn't been advertised before by the outbound peer (this covers the cases where the object would be in flight, or already downloaded but no acknowledged yet, by the current peer) + -- TODO; the condition below is problematic because it would prevent requesting an object from two different peers if the first one has already requested it (i.e. it is in flight)! + && objectId `Map.notMember` dgsObjectsLiveMultiplicities -- the object is not currently in flight or further in the processing from another peer + ) $ strictSeqToSet $ receivedIdsSeq - -- Add all `objectIds` from `dpsObjectsAvailableIdsMap` which are not - -- unacknowledged or already buffered. Unacknowledged objectIds must have - -- already been added to `dpsObjectsAvailableIds` map before. dpsObjectsAvailableIds' = - Foldable.foldl' - (\m objectId -> Set.insert objectId m) - dpsObjectsAvailableIds - ( Seq.filter - ( \objectId -> - objectId `notElem` dpsOutstandingFifo - && objectId `Map.notMember` dgsObjectsLiveMultiplicities - ) - newIdsAvailableSeq - ) + dpsObjectsAvailableIds `Set.union` newObjectsAvailableIds -- Add received objectIds to `dpsOutstandingFifo`. dpsOutstandingFifo' = dpsOutstandingFifo <> receivedIdsSeq - dgsObjectsLiveMultiplicities' = - Foldable.foldl' - decreaseCount - dgsObjectsLiveMultiplicities - receivedIdsSeq - peerState' = assert (dpsNumIdsInflight >= numIdsInitiallyRequested) @@ -184,23 +171,6 @@ handleReceivedObjectsImpl -- Monadic public API -- -type DecisionGlobalStateVar m peerAddr objectId object = - StrictTVar m (DecisionGlobalState peerAddr objectId object) - -newDecisionGlobalStateVar :: - MonadSTM m => - StdGen -> - m (DecisionGlobalStateVar m peerAddr objectId object) -newDecisionGlobalStateVar rng = - newTVarIO - DecisionGlobalState - { dgsPeerStates = Map.empty - , dgsObjectsInflightMultiplicities = Map.empty - , dgsObjectsLiveMultiplicities = Map.empty - , dgsObjectsOwtPoolMultiplicities = Map.empty - , dgsRng = rng - } - -- | Wrapper around `handleReceivedIdsImpl`. -- Obtain the `hasObject` function atomically from the STM context and -- updates and traces the global state TVar. @@ -283,18 +253,16 @@ submitObjectsToPool let getId = opwObjectId objectPoolWriter in modifyTVar globalStateVar $ \globalState -> Foldable.foldl' - (\st object -> updateObjectsOwtPool (getId object) object st) + (\st object -> updateStateWhenObjectAddedToPool (getId object) st) globalState objects where - updateObjectsOwtPool :: + updateStateWhenObjectAddedToPool :: objectId -> - object -> DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object - updateObjectsOwtPool + updateStateWhenObjectAddedToPool objectId - object st@DecisionGlobalState { dgsObjectsLiveMultiplicities , dgsObjectsOwtPoolMultiplicities @@ -307,11 +275,10 @@ submitObjectsToPool } where dgsObjectsOwtPoolMultiplicities' = decreaseCount dgsObjectsOwtPoolMultiplicities objectId - - dgsObjectsLiveMultiplicities' = increaseCount dgsObjectsLiveMultiplicities objectId + dgsObjectsLiveMultiplicities' = decreaseCount dgsObjectsLiveMultiplicities objectId dgsPeerStates' = - Map.update - (\ps -> Just $! ps{dpsObjectsPending = Map.insert objectId object (dpsObjectsPending ps)}) + Map.adjust + (\ps@DecisionPeerState{dpsObjectsOwtPool} -> ps{dpsObjectsOwtPool = Map.delete objectId dpsObjectsOwtPool}) peerAddr dgsPeerStates diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index a14a554cf4..3eb8a235e3 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -15,6 +15,8 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -- * DecisionGlobalState , DecisionGlobalState (..) + , DecisionGlobalStateVar + , newDecisionGlobalStateVar -- * Decisions , PeerDecision (..) @@ -44,7 +46,7 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types , newObjectPoolSem ) where -import Control.Concurrent.Class.MonadSTM.Strict (MonadSTM, atomically) +import Control.Concurrent.Class.MonadSTM.Strict (MonadSTM, atomically, StrictTVar, newTVarIO) import Control.Concurrent.Class.MonadSTM.TSem (TSem, newTSem) import Control.Exception (Exception (..)) import Control.Monad.Class.MonadTime.SI @@ -195,6 +197,23 @@ dgsObjectsOwtPool :: Ord objectId => DecisionGlobalState peerAddr objectId objec dgsObjectsOwtPool DecisionGlobalState{dgsPeerStates} = Map.unions (dpsObjectsOwtPool <$> (Map.elems dgsPeerStates)) +type DecisionGlobalStateVar m peerAddr objectId object = + StrictTVar m (DecisionGlobalState peerAddr objectId object) + +newDecisionGlobalStateVar :: + MonadSTM m => + StdGen -> + m (DecisionGlobalStateVar m peerAddr objectId object) +newDecisionGlobalStateVar rng = + newTVarIO + DecisionGlobalState + { dgsPeerStates = Map.empty + , dgsObjectsInflightMultiplicities = Map.empty + , dgsObjectsLiveMultiplicities = Map.empty + , dgsObjectsOwtPoolMultiplicities = Map.empty + , dgsRng = rng + } + -- -- Decisions -- From 70a793fc065b930cb3e1ca2e5c204357a5e8382a Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Fri, 10 Oct 2025 17:08:19 +0200 Subject: [PATCH 16/43] Improve state management and diagram --- .../ObjectDiffusion/Inbound/V2.mermaid | 55 +++++++++++------- .../ObjectDiffusion/Inbound/V2/State.hs | 57 +++++++++++++++---- .../ObjectDiffusion/Inbound/V2/Types.hs | 15 +++++ 3 files changed, 95 insertions(+), 32 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid index f0913589af..902bd0e4cf 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid @@ -11,31 +11,42 @@ flowchart TD I(dgsObjectsOwtPoolMultiplicities) EA{requestIds} - EA-->|count| A + EA-->|+count| A - EB{receiveIds} - A --> EB - EB -->|ids| B - EB -->|ids| C + EB{handleReceiveIds} + A -->|-count| EB + EB -->|+ids| B + IN1@{ shape: lin-cyl, label: "ids" } --o EB + EB -->|+ids| C EC{requestObjects} - C --> EC - EC -->|ids| D - EC --> |count| G - EC -->|count| H - - ED{receiveObjects} - D --> ED - H --> ED + C -->|-ids| EC + EC -->|+ids| D + EC --> |+count| G + EC -->|+count| H + + ED{handleReceiveObjects} + D -->|-ids| ED + H -->|-count| ED + IN2@{ shape: lin-cyl, label: "objects" } --o ED ED -->|objects| E EE{acknowledgeIds} - B --> EE - E --> EE - EE -->|objects| F - EE -->|count| I - - EF{Added to pool} - F --> EF - G --> EF - I --> EF + B -->|-ids| EE + + EF{submitToPool} + EE -.->|ids| EF + E -->|-objects| EF + EF -->|+objects| F + EF -->|+count| I + + EG{Added to pool} + F -->|-objects| EG + G -->|-count| EG + I -->|-count| EG + + EZ{makeDecision} + + EZ -.->|count| EA + EZ -.->|ids| EC + EZ -.->|ids| EE diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 31d38b6043..36b91fac60 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -20,6 +20,7 @@ import Control.Tracer (Tracer, traceWith) import Data.Foldable qualified as Foldable import Data.Map.Strict (Map, findWithDefault) import Data.Map.Strict qualified as Map +import Data.Map.Merge.Strict qualified as Map import Data.Sequence qualified as Seq import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq @@ -57,16 +58,16 @@ handleReceivedIdsImpl peerAddr numIdsInitiallyRequested receivedIdsSeq - st@DecisionGlobalState + globalState@DecisionGlobalState { dgsPeerStates - , dgsObjectsLiveMultiplicities } = - st + globalState { dgsPeerStates = dgsPeerStates' } where peerState@DecisionPeerState { dpsOutstandingFifo + , dpsObjectsInflightIds , dpsObjectsAvailableIds , dpsNumIdsInflight } = @@ -79,9 +80,8 @@ handleReceivedIdsImpl newObjectsAvailableIds = Set.filter (\objectId -> (not . hasObject $ objectId) -- object isn't already in the object pool - && objectId `notElem` dpsOutstandingFifo -- object hasn't been advertised before by the outbound peer (this covers the cases where the object would be in flight, or already downloaded but no acknowledged yet, by the current peer) - -- TODO; the condition below is problematic because it would prevent requesting an object from two different peers if the first one has already requested it (i.e. it is in flight)! - && objectId `Map.notMember` dgsObjectsLiveMultiplicities -- the object is not currently in flight or further in the processing from another peer + && objectId `Set.notMember` dpsObjectsInflightIds -- object isn't in flight from current peer + && objectId `Map.notMember` (dgsObjectsPendingOrOwtPoolMultiplicities globalState) -- the object has not been successfully downloaded from another peer ) $ strictSeqToSet $ receivedIdsSeq dpsObjectsAvailableIds' = @@ -240,23 +240,60 @@ submitObjectsToPool globalStateVar objectPoolWriter peerAddr - objects = + objects = do + let getId = opwObjectId objectPoolWriter + + -- Move objects from `pending` to `owtPool` state + atomically $ modifyTVar globalStateVar $ \globalState -> + Foldable.foldl' + (\st object -> updateStateWhenObjectOwtPool (getId object) st) + globalState + objects + bracket_ (atomically $ waitTSem poolSem) (atomically $ signalTSem poolSem) $ do + -- When the lock over the object pool is obtained opwAddObjects objectPoolWriter objects traceWith tracer $ TraceObjectDiffusionInboundCollectedObjects $ NumObjectsProcessed $ fromIntegral $ length objects - atomically $ - let getId = opwObjectId objectPoolWriter in - modifyTVar globalStateVar $ \globalState -> + + -- Move objects from `owtPool` to `inPool` state + atomically $ modifyTVar globalStateVar $ \globalState -> Foldable.foldl' (\st object -> updateStateWhenObjectAddedToPool (getId object) st) globalState objects where + updateStateWhenObjectOwtPool :: + objectId -> + DecisionGlobalState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object + updateStateWhenObjectOwtPool + objectId + st@DecisionGlobalState + { dgsObjectsOwtPoolMultiplicities + , dgsPeerStates + } = + st + { dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' + , dgsPeerStates = dgsPeerStates' + } + where + dgsObjectsOwtPoolMultiplicities' = increaseCount dgsObjectsOwtPoolMultiplicities objectId + + dgsPeerStates' = + Map.adjust + (\ps@DecisionPeerState{dpsObjectsOwtPool, dpsObjectsPending} -> + let object = case Map.lookup objectId dpsObjectsPending of + Just obj -> obj + Nothing -> error "ObjectDiffusion.updateStateWhenObjectOwtPool: the object should be in dpsObjectsPending" + in ps{dpsObjectsPending = Map.delete objectId dpsObjectsPending, dpsObjectsOwtPool = Map.insert objectId object dpsObjectsOwtPool}) + peerAddr + dgsPeerStates + updateStateWhenObjectAddedToPool :: objectId -> DecisionGlobalState peerAddr objectId object -> diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 3eb8a235e3..7b6034bbb8 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -15,6 +15,10 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -- * DecisionGlobalState , DecisionGlobalState (..) + , dgsObjectsAvailableIds + , dgsObjectsPending + , dgsObjectsOwtPool + , dgsObjectsPendingOrOwtPoolMultiplicities , DecisionGlobalStateVar , newDecisionGlobalStateVar @@ -52,6 +56,7 @@ import Control.Exception (Exception (..)) import Control.Monad.Class.MonadTime.SI import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map +import Data.Map.Merge.Strict qualified as Map import Data.Monoid (Sum (..)) import Data.Sequence.Strict (StrictSeq) import Data.Set (Set) @@ -197,6 +202,16 @@ dgsObjectsOwtPool :: Ord objectId => DecisionGlobalState peerAddr objectId objec dgsObjectsOwtPool DecisionGlobalState{dgsPeerStates} = Map.unions (dpsObjectsOwtPool <$> (Map.elems dgsPeerStates)) +nonZeroCountMapDiff :: (Ord k) => Map k ObjectMultiplicity -> Map k ObjectMultiplicity -> Map k ObjectMultiplicity +nonZeroCountMapDiff = Map.merge + Map.preserveMissing + Map.dropMissing + (Map.zipWithMaybeMatched (\_ count1 count2 -> let c = count1 - count2 in if c > 0 then Just c else Nothing)) + +dgsObjectsPendingOrOwtPoolMultiplicities :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Map objectId ObjectMultiplicity +dgsObjectsPendingOrOwtPoolMultiplicities DecisionGlobalState{dgsObjectsLiveMultiplicities, dgsObjectsInflightMultiplicities} = + nonZeroCountMapDiff dgsObjectsLiveMultiplicities dgsObjectsInflightMultiplicities + type DecisionGlobalStateVar m peerAddr objectId object = StrictTVar m (DecisionGlobalState peerAddr objectId object) From 553c767fc649328a9be9cd886d577abfd980c51d Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Mon, 13 Oct 2025 10:21:50 +0200 Subject: [PATCH 17/43] Update state and diagram following removal of `dgsObjectsLiveMultiplicities` --- .../ObjectDiffusion/Inbound/V2.hs | 2 +- .../ObjectDiffusion/Inbound/V2.mermaid | 8 +- .../ObjectDiffusion/Inbound/V2/Decision.hs | 26 +-- .../ObjectDiffusion/Inbound/V2/Policy.hs | 9 +- .../ObjectDiffusion/Inbound/V2/Registry.hs | 35 +-- .../ObjectDiffusion/Inbound/V2/State.hs | 200 +++++++++--------- .../ObjectDiffusion/Inbound/V2/Types.hs | 95 +++------ 7 files changed, 172 insertions(+), 203 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index ef8b0c6cec..0122b2152b 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -85,7 +85,7 @@ objectDiffusionInbound , pdObjectsToPool = pdObjectsToPool } <- readPeerDecision - traceWith tracer (TraceObjectDiffusionInboundDecisionReceived object) + traceWith tracer (TraceObjectDiffusionInboundReceivedDecision object) let !collected = length undefined diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid index 902bd0e4cf..44ebca3d2c 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid @@ -6,8 +6,8 @@ flowchart TD E(dpsObjectsPending) F(dpsObjectsOwtPool) - G(dgsObjectsLiveMultiplicities) H(dgsObjectsInflightMultiplicities) + G(dgsObjectsPendingMultiplicities) I(dgsObjectsOwtPoolMultiplicities) EA{requestIds} @@ -22,14 +22,14 @@ flowchart TD EC{requestObjects} C -->|-ids| EC EC -->|+ids| D - EC --> |+count| G EC -->|+count| H ED{handleReceiveObjects} D -->|-ids| ED H -->|-count| ED IN2@{ shape: lin-cyl, label: "objects" } --o ED - ED -->|objects| E + ED -->|+objects| E + ED -->|+count| G EE{acknowledgeIds} B -->|-ids| EE @@ -37,12 +37,12 @@ flowchart TD EF{submitToPool} EE -.->|ids| EF E -->|-objects| EF + G -->|-count| EF EF -->|+objects| F EF -->|+count| I EG{Added to pool} F -->|-objects| EG - G -->|-count| EG I -->|-count| EG EZ{makeDecision} diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index e3b284037f..dcc51718e3 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -94,8 +94,8 @@ data DecisionInternalState peerAddr objectId object , disObjectsInflightMultiplicities :: !(Map objectId ObjectMultiplicity) -- ^ `objectId`s in-flight. , disIdsToAckMultiplicities :: !(Map objectId ObjectMultiplicity) - -- ^ acknowledged `objectId` with multiplicities. It is used to update - -- `dgsObjectsLiveMultiplicities`. + -- ^ acknowledged `objectId` with multiplicities. It is used to update + -- `dgsObjectsPendingMultiplicities`. , disObjectsOwtPoolIds :: Set objectId -- ^ objects on their way to the objectpool. Used to prevent issueing new -- fetch requests for them. @@ -135,7 +135,7 @@ pickObjectsToDownload { dgsPeerStates , dgsObjectsInflightMultiplicities , dgsObjectsOwtPoolMultiplicities - , dgsObjectsLiveMultiplicities + , dgsObjectsPendingMultiplicities } = -- outer fold: fold `[(peerAddr, DecisionPeerState objectId object)]` List.mapAccumR @@ -258,7 +258,7 @@ pickObjectsToDownload disObjectsInflightMultiplicities -- remove `object`s which were already downloaded by some -- other peer or are in-flight or unknown by this peer. - `Set.unions` ( Map.keysSet dgsObjectsLiveMultiplicities + `Set.unions` ( Map.keysSet dgsObjectsPendingMultiplicities <> dpsObjectsInflightIds <> dpsObjectsRequestedButNotReceivedIds <> disObjectsOwtPoolIds @@ -347,7 +347,7 @@ pickObjectsToDownload Map.fromList ((\(a, _) -> a) <$> as) <> dgsPeerStates - dgsObjectsLiveMultiplicities' = + dgsObjectsPendingMultiplicities' = Map.merge (Map.mapMaybeMissing \_ x -> Just x) (Map.mapMaybeMissing \_ _ -> assert False Nothing) @@ -356,7 +356,7 @@ pickObjectsToDownload then Just $! x - y else Nothing ) - dgsObjectsLiveMultiplicities + dgsObjectsPendingMultiplicities disIdsToAckMultiplicities dgsObjectsOwtPoolMultiplicities' = @@ -364,7 +364,7 @@ pickObjectsToDownload in ( sharedState { dgsPeerStates = dgsPeerStates' , dgsObjectsInflightMultiplicities = disObjectsInflightMultiplicities - , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' + , dgsObjectsPendingMultiplicities = dgsObjectsPendingMultiplicities' , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' } , -- exclude empty results @@ -426,7 +426,7 @@ filterActivePeers where unrequestable = Map.keysSet (Map.filter (>= dpMaxObjectInflightMultiplicity) dgsObjectsInflightMultiplicities) - <> Map.keysSet dgsObjectsLiveMultiplicities + <> Map.keysSet dgsObjectsPendingMultiplicities fn :: DecisionPeerState objectId object -> Bool fn @@ -563,7 +563,7 @@ acknowledgeObjectIds objectsOwtPoolList = [ (objectId, object) | objectId <- toList toObjectPoolObjectIds - , objectId `Map.notMember` dgsObjectsLiveMultiplicities globalState + , objectId `Map.notMember` dgsObjectsPendingMultiplicities globalState , object <- maybeToList $ objectId `Map.lookup` dpsObjectsPending ] (toObjectPoolObjectIds, _) = @@ -621,7 +621,7 @@ splitAcknowledgedObjectIds , dpMaxNumObjectIdsReq } DecisionGlobalState - { dgsObjectsLiveMultiplicities + { dgsObjectsPendingMultiplicities } DecisionPeerState { dpsOutstandingFifo @@ -634,7 +634,7 @@ splitAcknowledgedObjectIds (acknowledgedObjectIds', dpsOutstandingFifo') = StrictSeq.spanl ( \objectId -> - ( objectId `Map.member` dgsObjectsLiveMultiplicities + ( objectId `Map.member` dgsObjectsPendingMultiplicities || objectId `Set.member` dpsObjectsRequestedButNotReceivedIds || objectId `Map.member` dpsObjectsPending ) @@ -662,7 +662,7 @@ updateRefCounts :: Map objectId Int -> RefCountDiff objectId -> Map objectId Int -updateRefCounts dgsObjectsLiveMultiplicities (RefCountDiff diff) = +updateRefCounts dgsObjectsPendingMultiplicities (RefCountDiff diff) = Map.merge (Map.mapMaybeMissing \_ x -> Just x) (Map.mapMaybeMissing \_ _ -> Nothing) @@ -673,5 +673,5 @@ updateRefCounts dgsObjectsLiveMultiplicities (RefCountDiff diff) = then Just $! x - y else Nothing ) - dgsObjectsLiveMultiplicities + dgsObjectsPendingMultiplicities diff \ No newline at end of file diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs index 8d993086db..89e89b2db7 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -3,7 +3,6 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy , defaultDecisionPolicy ) where -import Control.Monad.Class.MonadTime.SI import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq (..), NumObjectsOutstanding, NumObjectsReq (..)) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types (ObjectMultiplicity) @@ -19,9 +18,6 @@ data DecisionPolicy = DecisionPolicy -- ^ a limit of object size in-flight from all peers, plus or minus 1 , dpMaxObjectInflightMultiplicity :: !ObjectMultiplicity -- ^ from how many peers download the `objectId` simultaneously - , dpMinObtainedButNotAckedObjectsLifetime :: !DiffTime - -- ^ how long objects that have been added to the objectpool will be - -- kept in the `dgsObjectsLiveMultiplicities` cache. } deriving Show @@ -30,8 +26,7 @@ defaultDecisionPolicy = DecisionPolicy { dpMaxNumObjectIdsReq = 3 , dpMaxNumObjectsOutstanding = 10 -- must be the same as objectDiffusionMaxUnacked - , dpMaxNumObjectsInflightPerPeer = NumObjectsReq 6 - , dpMaxNumObjectsInflightTotal = NumObjectsReq 20 + , dpMaxNumObjectsInflightPerPeer = 6 + , dpMaxNumObjectsInflightTotal = 20 , dpMaxObjectInflightMultiplicity = 2 - , dpMinObtainedButNotAckedObjectsLifetime = 2 } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 24f6c24e9c..f357e3736e 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -33,7 +33,6 @@ import Data.Set qualified as Set import Data.Void (Void) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State (DecisionGlobalStateVar) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State qualified as State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API @@ -89,9 +88,9 @@ withPeer objectDiffusionTracer decisionChannelsVar objectPoolSem - decisionPolicy + _decisionPolicy globalStateVar - objectPoolReader + _objectPoolReader objectPoolWriter peerAddr withAPI = @@ -118,20 +117,24 @@ withPeer , InboundPeerAPI { readPeerDecision = takeMVar chan' , handleReceivedIds = State.handleReceivedIds + objectDiffusionTracer decisionTracer globalStateVar objectPoolWriter peerAddr (error "TODO: provide the number of requested IDs") , handleReceivedObjects = State.handleReceivedObjects + objectDiffusionTracer decisionTracer globalStateVar + objectPoolWriter peerAddr , submitObjectsToPool = State.submitObjectsToPool objectDiffusionTracer - objectPoolSem + decisionTracer globalStateVar objectPoolWriter + objectPoolSem peerAddr } ) @@ -182,21 +185,21 @@ withPeer unregisterPeerGlobalState st@DecisionGlobalState { dgsPeerStates - , dgsObjectsLiveMultiplicities , dgsObjectsInflightMultiplicities + , dgsObjectsPendingMultiplicities , dgsObjectsOwtPoolMultiplicities } = st { dgsPeerStates = dgsPeerStates' - , dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' , dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' + , dgsObjectsPendingMultiplicities = dgsObjectsPendingMultiplicities' , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' } where -- First extract the DPS of the specified peer from the DGS ( DecisionPeerState - { dpsOutstandingFifo - , dpsObjectsInflightIds + { dpsObjectsInflightIds + , dpsObjectsPending , dpsObjectsOwtPool } , dgsPeerStates' @@ -208,14 +211,6 @@ withPeer ) peerAddr dgsPeerStates - - -- Update the dgsObjectsLiveMultiplicities map by decreasing the count of each - -- objectId which is part of the dpsOutstandingFifo of this peer. - dgsObjectsLiveMultiplicities' = - Foldable.foldl' - decreaseCount - dgsObjectsLiveMultiplicities - dpsOutstandingFifo -- Update dgsInflightMultiplicities map by decreasing the count -- of objects that were in-flight for this peer. @@ -225,6 +220,14 @@ withPeer dgsObjectsInflightMultiplicities dpsObjectsInflightIds + -- Update the dgsObjectsPendingMultiplicities map by decreasing the count of each + -- objectId which is part of the dpsObjectsPending of this peer. + dgsObjectsPendingMultiplicities' = + Foldable.foldl' + decreaseCount + dgsObjectsPendingMultiplicities + (Map.keysSet dpsObjectsPending) + -- Finally, we need to update dgsObjectsOwtPoolMultiplicities by decreasing the count of -- each objectId which is part of the dpsObjectsOwtPool of this peer. dgsObjectsOwtPoolMultiplicities' = diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 36b91fac60..4ac6d02669 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -20,8 +20,6 @@ import Control.Tracer (Tracer, traceWith) import Data.Foldable qualified as Foldable import Data.Map.Strict (Map, findWithDefault) import Data.Map.Strict qualified as Map -import Data.Map.Merge.Strict qualified as Map -import Data.Sequence qualified as Seq import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq import Data.Set ((\\), Set) @@ -31,14 +29,38 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (ObjectPoolWriter (..)) import Ouroboros.Consensus.Util.IOLike (MonadMask, MonadMVar, bracket_) import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq) -import System.Random (StdGen) strictSeqToSet :: Ord a => StrictSeq a -> Set a strictSeqToSet = Foldable.foldl' (flip Set.insert) Set.empty --- | Insert received `objectId`s and return the number of objectIds to be acknowledged with next request --- and the updated `DecisionGlobalState`. --- TODO: check for possible errors in the peer response, raise exception if it happened +-- | Wrapper around `handleReceivedIdsImpl`. +-- Obtain the `hasObject` function atomically from the STM context and +-- updates and traces the global state TVar. +handleReceivedIds :: + forall m peerAddr object objectId. + (MonadSTM m, Ord objectId, Ord peerAddr) => + Tracer m (TraceObjectDiffusionInbound objectId object) -> + Tracer m (TraceDecisionLogic peerAddr objectId object) -> + DecisionGlobalStateVar m peerAddr objectId object -> + ObjectPoolWriter objectId object m -> + peerAddr -> + -- | number of requests to subtract from + -- `dpsNumIdsInflight` + NumObjectIdsReq -> + -- | sequence of received `objectIds` + StrictSeq objectId -> + -- | received `objectId`s + m () +handleReceivedIds odTracer decisionTracer globalStateVar objectPoolWriter peerAddr numIdsInitiallyRequested receivedIdsSeq = do + globalState' <- atomically $ do + hasObject <- opwHasObject objectPoolWriter + stateTVar + globalStateVar + ( \globalState -> let globalState' = handleReceivedIdsImpl hasObject peerAddr numIdsInitiallyRequested receivedIdsSeq globalState + in (globalState', globalState') ) + traceWith odTracer (TraceObjectDiffusionInboundReceivedIds (StrictSeq.length receivedIdsSeq)) + traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedIds" globalState') + handleReceivedIdsImpl :: forall peerAddr object objectId. (Ord objectId, Ord peerAddr, HasCallStack) => @@ -49,7 +71,7 @@ handleReceivedIdsImpl :: -- | number of requests to subtract from -- `dpsNumIdsInflight` NumObjectIdsReq -> - -- | sequence of received `objectIds` + -- | sequence of received `objectId`s StrictSeq objectId -> DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object @@ -60,6 +82,8 @@ handleReceivedIdsImpl receivedIdsSeq globalState@DecisionGlobalState { dgsPeerStates + , dgsObjectsPendingMultiplicities + , dgsObjectsOwtPoolMultiplicities } = globalState { dgsPeerStates = dgsPeerStates' @@ -81,7 +105,8 @@ handleReceivedIdsImpl Set.filter (\objectId -> (not . hasObject $ objectId) -- object isn't already in the object pool && objectId `Set.notMember` dpsObjectsInflightIds -- object isn't in flight from current peer - && objectId `Map.notMember` (dgsObjectsPendingOrOwtPoolMultiplicities globalState) -- the object has not been successfully downloaded from another peer + && objectId `Map.notMember` dgsObjectsPendingMultiplicities -- the object has not been successfully downloaded from another peer + && objectId `Map.notMember` dgsObjectsOwtPoolMultiplicities -- (either pending ack or owt pool) ) $ strictSeqToSet $ receivedIdsSeq dpsObjectsAvailableIds' = @@ -101,6 +126,37 @@ handleReceivedIdsImpl dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates +-- | Wrapper around `handleReceivedObjectsImpl` that updates and traces the +-- global state TVar. +-- +-- Error handling should be done by the client before using the API. +-- In particular we assume: +-- assert (objectsRequestedIds `Set.isSubsetOf` dpsObjectsInflightIds) +-- +-- IMPORTANT: We also assume that every object has been *validated* before being passed to this function. +handleReceivedObjects :: + forall m peerAddr object objectId. + ( MonadSTM m + , Ord objectId + , Ord peerAddr + ) => + Tracer m (TraceObjectDiffusionInbound objectId object) -> + Tracer m (TraceDecisionLogic peerAddr objectId object) -> + DecisionGlobalStateVar m peerAddr objectId object -> + ObjectPoolWriter objectId object m -> + peerAddr -> + -- | received objects + Map objectId object -> + m () +handleReceivedObjects odTracer tracer globalStateVar _objectPoolWriter peerAddr objectsReceived = do + globalState' <- atomically $ do + stateTVar + globalStateVar + ( \globalState -> let globalState' = handleReceivedObjectsImpl peerAddr objectsReceived globalState + in (globalState', globalState') ) + traceWith odTracer (TraceObjectDiffusionInboundReceivedObjects (Map.size objectsReceived)) + traceWith tracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedObjects" globalState') + handleReceivedObjectsImpl :: forall peerAddr object objectId. ( Ord peerAddr @@ -117,13 +173,11 @@ handleReceivedObjectsImpl st@DecisionGlobalState { dgsPeerStates , dgsObjectsInflightMultiplicities + , dgsObjectsPendingMultiplicities } = - -- TODO: error handling should be done by the client before using the API - -- past that point we assume: - -- assert (objectsRequestedIds `Set.isSubsetOf` dpsObjectsInflightIds) $ - -- assert (objectsReceivedIds == objectsRequestedIds) $ st { dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' + , dgsObjectsPendingMultiplicities = dgsObjectsPendingMultiplicities' , dgsPeerStates = dgsPeerStates' } where @@ -138,8 +192,6 @@ handleReceivedObjectsImpl peerAddr dgsPeerStates - dpsObjectsPending' = dpsObjectsPending <> objectsReceived - -- subtract requested from in-flight dpsObjectsInflightIds' = dpsObjectsInflightIds \\ objectsReceivedIds @@ -149,15 +201,14 @@ handleReceivedObjectsImpl decreaseCount dgsObjectsInflightMultiplicities objectsReceivedIds + + dpsObjectsPending' = dpsObjectsPending <> objectsReceived - -- Update DecisionPeerState - -- - -- Remove the downloaded `objectId`s from the dpsObjectsAvailableIds map, this - -- guarantees that we won't attempt to download the `objectIds` from - -- this peer twice. - -- TODO: this is wrong, it should be done earlier when the request for objects is emitted - -- aka in when the decision for this peer is emitted/read? - -- dpsObjectsAvailableIds'' = dpsObjectsAvailableIds `Set.difference` objectsReceivedIds + dgsObjectsPendingMultiplicities' = + Foldable.foldl' + increaseCount + dgsObjectsPendingMultiplicities + objectsReceivedIds peerState' = peerState @@ -167,58 +218,6 @@ handleReceivedObjectsImpl dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates --- --- Monadic public API --- - --- | Wrapper around `handleReceivedIdsImpl`. --- Obtain the `hasObject` function atomically from the STM context and --- updates and traces the global state TVar. -handleReceivedIds :: - forall m peerAddr object objectId. - (MonadSTM m, Ord objectId, Ord peerAddr) => - Tracer m (TraceDecisionLogic peerAddr objectId object) -> - DecisionGlobalStateVar m peerAddr objectId object -> - ObjectPoolWriter objectId object m -> - peerAddr -> - -- | number of requests to subtract from - -- `dpsNumIdsInflight` - NumObjectIdsReq -> - -- | sequence of received `objectIds` - StrictSeq objectId -> - -- | received `objectId`s - m () -handleReceivedIds tracer globalStateVar objectPoolWriter peerAddr numIdsInitiallyRequested receivedIdsSeq = do - globalState' <- atomically $ do - hasObject <- opwHasObject objectPoolWriter - stateTVar - globalStateVar - ( \globalState -> let globalState' = handleReceivedIdsImpl hasObject peerAddr numIdsInitiallyRequested receivedIdsSeq globalState - in (globalState', globalState') ) - traceWith tracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedIds" globalState') - --- | Wrapper around `handleReceivedObjectsImpl` that updates and traces the --- global state TVar. -handleReceivedObjects :: - forall m peerAddr object objectId. - ( MonadSTM m - , Ord objectId - , Ord peerAddr - ) => - Tracer m (TraceDecisionLogic peerAddr objectId object) -> - DecisionGlobalStateVar m peerAddr objectId object -> - peerAddr -> - -- | received objects - Map objectId object -> - m () -handleReceivedObjects tracer globalStateVar peerAddr objectsReceived = do - globalState' <- atomically $ do - stateTVar - globalStateVar - ( \globalState -> let globalState' = handleReceivedObjectsImpl peerAddr objectsReceived globalState - in (globalState', globalState') ) - traceWith tracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedObjects" globalState') - submitObjectsToPool :: forall m peerAddr object objectId. ( Ord objectId @@ -228,27 +227,32 @@ submitObjectsToPool :: , MonadSTM m ) => Tracer m (TraceObjectDiffusionInbound objectId object) -> - ObjectPoolSem m -> + Tracer m (TraceDecisionLogic peerAddr objectId object) -> DecisionGlobalStateVar m peerAddr objectId object -> ObjectPoolWriter objectId object m -> + ObjectPoolSem m -> peerAddr -> [object] -> m () submitObjectsToPool - tracer - (ObjectPoolSem poolSem) + odTracer + decisionTracer globalStateVar objectPoolWriter + (ObjectPoolSem poolSem) peerAddr objects = do let getId = opwObjectId objectPoolWriter -- Move objects from `pending` to `owtPool` state - atomically $ modifyTVar globalStateVar $ \globalState -> - Foldable.foldl' - (\st object -> updateStateWhenObjectOwtPool (getId object) st) - globalState - objects + globalState' <- atomically $ stateTVar globalStateVar $ \globalState -> + let globalState' = + Foldable.foldl' + (\st object -> updateStateWhenObjectOwtPool (getId object) st) + globalState + objects + in (globalState', globalState') + traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "submitObjectsToPool.updateStateWhenObjectOwtPool" globalState') bracket_ (atomically $ waitTSem poolSem) @@ -256,16 +260,18 @@ submitObjectsToPool $ do -- When the lock over the object pool is obtained opwAddObjects objectPoolWriter objects - traceWith tracer $ - TraceObjectDiffusionInboundCollectedObjects $ - NumObjectsProcessed $ fromIntegral $ length objects + traceWith odTracer $ + TraceObjectDiffusionInboundAddedObjects $ length objects -- Move objects from `owtPool` to `inPool` state - atomically $ modifyTVar globalStateVar $ \globalState -> - Foldable.foldl' - (\st object -> updateStateWhenObjectAddedToPool (getId object) st) - globalState - objects + globalState' <- atomically $ stateTVar globalStateVar $ \globalState -> + let globalState' = + Foldable.foldl' + (\st object -> updateStateWhenObjectAddedToPool (getId object) st) + globalState + objects + in (globalState', globalState') + traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "submitObjectsToPool.updateStateWhenObjectAddedToPool" globalState') where updateStateWhenObjectOwtPool :: objectId -> @@ -274,14 +280,17 @@ submitObjectsToPool updateStateWhenObjectOwtPool objectId st@DecisionGlobalState - { dgsObjectsOwtPoolMultiplicities + { dgsObjectsPendingMultiplicities + , dgsObjectsOwtPoolMultiplicities , dgsPeerStates } = st - { dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' + { dgsObjectsPendingMultiplicities = dgsObjectsPendingMultiplicities' + , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' , dgsPeerStates = dgsPeerStates' } where + dgsObjectsPendingMultiplicities' = decreaseCount dgsObjectsPendingMultiplicities objectId dgsObjectsOwtPoolMultiplicities' = increaseCount dgsObjectsOwtPoolMultiplicities objectId dgsPeerStates' = @@ -301,18 +310,15 @@ submitObjectsToPool updateStateWhenObjectAddedToPool objectId st@DecisionGlobalState - { dgsObjectsLiveMultiplicities - , dgsObjectsOwtPoolMultiplicities + { dgsObjectsOwtPoolMultiplicities , dgsPeerStates } = st - { dgsObjectsLiveMultiplicities = dgsObjectsLiveMultiplicities' - , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' + { dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' , dgsPeerStates = dgsPeerStates' } where dgsObjectsOwtPoolMultiplicities' = decreaseCount dgsObjectsOwtPoolMultiplicities objectId - dgsObjectsLiveMultiplicities' = decreaseCount dgsObjectsLiveMultiplicities objectId dgsPeerStates' = Map.adjust diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 7b6034bbb8..ee6b567ed3 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -15,10 +15,7 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -- * DecisionGlobalState , DecisionGlobalState (..) - , dgsObjectsAvailableIds - , dgsObjectsPending - , dgsObjectsOwtPool - , dgsObjectsPendingOrOwtPoolMultiplicities + , dgsObjectsAvailableMultiplicities , DecisionGlobalStateVar , newDecisionGlobalStateVar @@ -44,6 +41,7 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -- * Helpers for ObjectMultiplicity maps , increaseCount , decreaseCount + , nonZeroCountMapDiff -- * Object pool semaphore , ObjectPoolSem (..) @@ -131,17 +129,6 @@ instance NoThunks (DecisionPeerState objectId object) -- | Shared state of all `ObjectDiffusion` clients. --- --- New `objectId` enters `dpsOutstandingFifo` it is also added to `dpsObjectsAvailableIds` --- and `dgsObjectsLiveMultiplicities` (see `acknowledgeObjectIdsImpl`). --- --- When the requested object arrives, the corresponding entry is removed from `dgsObjectsInflightMultiplicities`. --- --- Whenever we choose an `objectId` to acknowledge (either in `acknowledObjectsIds`, --- `handleReceivedObjectsImpl` or --- `pickObjectsToDownload`, we also --- recalculate `dgsObjectsLiveMultiplicities` and only keep live `objectId`s in other maps (e.g. --- `dpsObjectsAvailableIds`). data DecisionGlobalState peerAddr objectId object = DecisionGlobalState { dgsPeerStates :: !(Map peerAddr (DecisionPeerState objectId object)) -- ^ Map of peer states. @@ -149,31 +136,25 @@ data DecisionGlobalState peerAddr objectId object = DecisionGlobalState -- /Invariant:/ for peerAddr's which are registered using `withPeer`, -- there's always an entry in this map even if the set of `objectId`s is -- empty. - , dgsObjectsLiveMultiplicities :: !(Map objectId ObjectMultiplicity) - -- ^ We track counts of live objects. - -- An object is added to the live map when it is inflight, and is only removed - -- after the retention timeout expires. - -- - -- The value for any key must be always non-zero (strictly positive). - -- - -- The `dgsObjectsOwtPoolMultiplicities` map contains a subset of `dgsObjectsLiveMultiplicities`. , dgsObjectsInflightMultiplicities :: !(Map objectId ObjectMultiplicity) - -- ^ Map from object ids of objects which are in-flight (have already been + -- ^ Map from ids of objects which are in-flight (have already been -- requested) to their multiplicities (from how many peers it is -- currently in-flight) -- - -- This can intersect with `dpsObjectsAvailableIds`. + -- This can intersect with some `dpsObjectsAvailableIds`. + -- The value for any key must be always non-zero (strictly positive). + , dgsObjectsPendingMultiplicities :: !(Map objectId ObjectMultiplicity) + -- ^ Map from ids of objects which have already been downloaded and validated + -- but not yet acknowledged, to their multiplicities + -- + -- The value for any key must be always non-zero (strictly positive). , dgsObjectsOwtPoolMultiplicities :: !(Map objectId ObjectMultiplicity) - -- ^ A set of objectIds that have been downloaded by a peer and are on their - -- way to the objectpool. We won't issue further fetch-requests for objects in - -- this state. We track these objects to not re-download them from another - -- peer. + -- ^ Map from ids of objects which have already been downloaded, validated, + -- acknowledged, and are on their way to the objectpool (waiting for the lock) + -- to their multiplicities -- - -- * We subtract from the counter when a given object is added or rejected by - -- the objectpool or do that for all objects in `dpsObjectsOwtPool` when a peer is - -- unregistered. - -- * We add to the counter when a given object is selected to be added to the - -- objectpool in `pickObjectsToDownload`. + -- * We subtract from the counter when a given object is added to the + -- objectpool , dgsRng :: !StdGen -- ^ Rng used to randomly order peers } @@ -188,19 +169,11 @@ instance NoThunks (DecisionGlobalState peerAddr objectId object) -- | Merge dpsObjectsAvailableIds from all peers of the global state. -dgsObjectsAvailableIds :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Set objectId -dgsObjectsAvailableIds DecisionGlobalState{dgsPeerStates} = - Set.unions (dpsObjectsAvailableIds <$> (Map.elems dgsPeerStates)) - --- | Merge dpsObjectsPending from all peers of the global state. -dgsObjectsPending :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Map objectId object -dgsObjectsPending DecisionGlobalState{dgsPeerStates} = - Map.unions (dpsObjectsPending <$> (Map.elems dgsPeerStates)) - --- | Merge dpsObjectsOwtPool from all peers of the global state. -dgsObjectsOwtPool :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Map objectId object -dgsObjectsOwtPool DecisionGlobalState{dgsPeerStates} = - Map.unions (dpsObjectsOwtPool <$> (Map.elems dgsPeerStates)) +dgsObjectsAvailableMultiplicities :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Map objectId ObjectMultiplicity +dgsObjectsAvailableMultiplicities DecisionGlobalState{dgsPeerStates} = + Map.unionsWith + (+) + ( Map.fromSet (const 1) . dpsObjectsAvailableIds <$> Map.elems dgsPeerStates) nonZeroCountMapDiff :: (Ord k) => Map k ObjectMultiplicity -> Map k ObjectMultiplicity -> Map k ObjectMultiplicity nonZeroCountMapDiff = Map.merge @@ -208,10 +181,6 @@ nonZeroCountMapDiff = Map.merge Map.dropMissing (Map.zipWithMaybeMatched (\_ count1 count2 -> let c = count1 - count2 in if c > 0 then Just c else Nothing)) -dgsObjectsPendingOrOwtPoolMultiplicities :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Map objectId ObjectMultiplicity -dgsObjectsPendingOrOwtPoolMultiplicities DecisionGlobalState{dgsObjectsLiveMultiplicities, dgsObjectsInflightMultiplicities} = - nonZeroCountMapDiff dgsObjectsLiveMultiplicities dgsObjectsInflightMultiplicities - type DecisionGlobalStateVar m peerAddr objectId object = StrictTVar m (DecisionGlobalState peerAddr objectId object) @@ -224,7 +193,7 @@ newDecisionGlobalStateVar rng = DecisionGlobalState { dgsPeerStates = Map.empty , dgsObjectsInflightMultiplicities = Map.empty - , dgsObjectsLiveMultiplicities = Map.empty + , dgsObjectsPendingMultiplicities = Map.empty , dgsObjectsOwtPoolMultiplicities = Map.empty , dgsRng = rng } @@ -302,10 +271,8 @@ data TraceDecisionLogic peerAddr objectId object data ObjectDiffusionCounters = ObjectDiffusionCounters - { odcDistinctNumObjectsAvailable :: Int + { odcNumDistinctObjectsAvailable :: Int -- ^ objectIds which are not yet downloaded. - , odcNumDistinctObjectsLive :: Int - -- ^ number of distinct live objects , odcNumDistinctObjectsInflight :: Int -- ^ number of distinct in-flight objects. , odcNumTotalObjectsInflight :: Int @@ -326,15 +293,14 @@ makeObjectDiffusionCounters :: makeObjectDiffusionCounters dgs@DecisionGlobalState { dgsObjectsInflightMultiplicities - , dgsObjectsLiveMultiplicities + , dgsObjectsPendingMultiplicities , dgsObjectsOwtPoolMultiplicities } = ObjectDiffusionCounters - { odcDistinctNumObjectsAvailable = Set.size $ dgsObjectsAvailableIds dgs - , odcNumDistinctObjectsLive = Map.size dgsObjectsLiveMultiplicities + { odcNumDistinctObjectsAvailable = Map.size $ dgsObjectsAvailableMultiplicities dgs , odcNumDistinctObjectsInflight = Map.size dgsObjectsInflightMultiplicities , odcNumTotalObjectsInflight = fromIntegral $ mconcat (Map.elems dgsObjectsInflightMultiplicities) - , odcNumDistinctObjectsPending = Map.size $ dgsObjectsPending dgs + , odcNumDistinctObjectsPending = Map.size dgsObjectsPendingMultiplicities , odcNumDistinctObjectsOwtPool = Map.size dgsObjectsOwtPoolMultiplicities } @@ -386,16 +352,15 @@ decreaseCount mmap k = mmap data TraceObjectDiffusionInbound objectId object - = -- | Number of objects just about to be inserted. - TraceObjectDiffusionInboundCollectedObjects NumObjectsProcessed - | -- | Just processed object pass/fail breakdown. - TraceObjectDiffusionInboundAddedObjects Int + = TraceObjectDiffusionInboundReceivedIds Int + | TraceObjectDiffusionInboundReceivedObjects Int + | TraceObjectDiffusionInboundAddedObjects Int | -- | Received a 'ControlMessage' from the outbound peer governor, and about -- to act on it. - TraceObjectDiffusionInboundRecvControlMessage ControlMessage + TraceObjectDiffusionInboundReceivedControlMessage ControlMessage | TraceObjectDiffusionInboundCanRequestMoreObjects Int | TraceObjectDiffusionInboundCannotRequestMoreObjects Int - | TraceObjectDiffusionInboundDecisionReceived (PeerDecision objectId object) + | TraceObjectDiffusionInboundReceivedDecision (PeerDecision objectId object) deriving (Eq, Show) data ObjectDiffusionInboundError From 56ee8648b508a0ed4483cc473b2c357df7dfe424 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Mon, 13 Oct 2025 11:31:21 +0200 Subject: [PATCH 18/43] Update state management (again) --- .../ObjectDiffusion/Inbound/V2.hs | 30 +-- .../ObjectDiffusion/Inbound/V2.mermaid | 14 +- .../ObjectDiffusion/Inbound/V2/Decision.hs | 20 +- .../ObjectDiffusion/Inbound/V2/Registry.hs | 56 +++-- .../ObjectDiffusion/Inbound/V2/State.hs | 214 ++++++++++++++---- .../ObjectDiffusion/Inbound/V2/Types.hs | 16 +- 6 files changed, 246 insertions(+), 104 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 0122b2152b..28734c8273 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -5,14 +5,15 @@ {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE ImportQualifiedPost #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 ( -- * ObjectDiffusion Inbound client objectDiffusionInbound - -- * InboundPeerAPI + -- * PeerStateAPI , withPeer - , InboundPeerAPI + , PeerStateAPI -- * Supporting types , module V2 @@ -38,11 +39,12 @@ import qualified Data.Set as Set import Network.TypedProtocol import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State (newDecisionGlobalStateVar) +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State qualified as State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types as V2 import Ouroboros.Network.Protocol.ObjectDiffusion.Inbound import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API + -- | A object-submission inbound side (server, sic!). -- -- The server blocks on receiving `PeerDecision` from the decision logic. If @@ -58,17 +60,17 @@ objectDiffusionInbound :: Tracer m (TraceObjectDiffusionInbound objectId object) -> ObjectDiffusionInitDelay -> ObjectPoolWriter objectId object m -> - InboundPeerAPI m objectId object -> + PeerStateAPI m objectId object -> ObjectDiffusionInboundPipelined objectId object m () objectDiffusionInbound tracer initDelay ObjectPoolWriter{} - InboundPeerAPI - { readPeerDecision - , handleReceivedIds - , handleReceivedObjects - , submitObjectsToPool + PeerStateAPI + { psaReadDecision + , psaOnReceivedIds + , psaOnReceivedObjects + , psaSubmitObjectsToPool } = ObjectDiffusionInboundPipelined $ do case initDelay of @@ -82,9 +84,9 @@ objectDiffusionInbound -- Block on next decision. object@PeerDecision { pdObjectsToReqIds = pdObjectsToReqIds - , pdObjectsToPool = pdObjectsToPool + , pdObjectsToSubmitToPoolIds = pdObjectsToSubmitToPoolIds } <- - readPeerDecision + psaReadDecision traceWith tracer (TraceObjectDiffusionInboundReceivedDecision object) let !collected = length undefined @@ -149,7 +151,7 @@ objectDiffusionInbound objectIdsMap = Map.fromList objectIds' when (StrictSeq.length receivedIdsSeq > fromIntegral objectIdsToReq) $ throwIO ProtocolErrorObjectIdsNotRequested - handleReceivedIds objectIdsToReq receivedIdsSeq objectIdsMap + onReceivedIds objectIdsToReq receivedIdsSeq objectIdsMap serverIdle ) serverReqObjectIds @@ -207,7 +209,7 @@ objectDiffusionInbound objectIdsMap = Map.fromList objectIds unless (StrictSeq.length receivedIdsSeq <= fromIntegral objectIdsToReq) $ throwIO ProtocolErrorObjectIdsNotRequested - handleReceivedIds objectIdsToReq receivedIdsSeq objectIdsMap + onReceivedIds objectIdsToReq receivedIdsSeq objectIdsMap k CollectObjects objectIds objects -> do let requested = Map.keysSet objectIds @@ -216,7 +218,7 @@ objectDiffusionInbound unless (Map.keysSet received `Set.isSubsetOf` requested) $ throwIO ProtocolErrorObjectNotRequested - mbe <- handleReceivedObjects objectIds received + mbe <- onReceivedObjects objectIds received traceWith tracer $ TraceObjectDiffusionCollected (getId `map` objects) case mbe of -- one of `object`s had a wrong size diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid index 44ebca3d2c..14737931a2 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid @@ -31,11 +31,13 @@ flowchart TD ED -->|+objects| E ED -->|+count| G - EE{acknowledgeIds} + EE{makeDecisionPreAcknowledge} B -->|-ids| EE - + EE -.->|readDecision : pdIdsToAck + pdIdsToReq + pdCanPipelineIdsReq/| EA + EE -.->|readDecision : pdObjectsToReqIds| EC + EE -.->|readDecision : pdObjectsToSubmitToPoolIds| EF + EF{submitToPool} - EE -.->|ids| EF E -->|-objects| EF G -->|-count| EF EF -->|+objects| F @@ -44,9 +46,3 @@ flowchart TD EG{Added to pool} F -->|-objects| EG I -->|-count| EG - - EZ{makeDecision} - - EZ -.->|count| EA - EZ -.->|ids| EC - EZ -.->|ids| EE diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index dcc51718e3..e04da7a8b4 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -180,7 +180,7 @@ pickObjectsToDownload then let ( numObjectIdsToAck , numObjectIdsToReq - , pdObjectsToPool + , pdObjectsToSubmitToPoolIds , RefCountDiff{rcdIdsToAckMultiplicities} , peerObjectState' ) = acknowledgeObjectIds decisionPolicy sharedState peerObjectState @@ -188,7 +188,7 @@ pickObjectsToDownload disIdsToAckMultiplicities' = Map.unionWith (+) disIdsToAckMultiplicities rcdIdsToAckMultiplicities disObjectsOwtPoolIds' = disObjectsOwtPoolIds - <> Map.keysSet pdObjectsToPool + <> Map.keysSet pdObjectsToSubmitToPoolIds in if dpsNumIdsInflight peerObjectState' > 0 then -- we have objectIds to request @@ -207,7 +207,7 @@ pickObjectsToDownload . dpsOutstandingFifo $ peerObjectState' , pdObjectsToReqIds = Set.empty - , pdObjectsToPool = pdObjectsToPool + , pdObjectsToSubmitToPoolIds = pdObjectsToSubmitToPoolIds } ) ) @@ -275,7 +275,7 @@ pickObjectsToDownload ( numObjectIdsToAck , numObjectIdsToReq - , pdObjectsToPool + , pdObjectsToSubmitToPoolIds , RefCountDiff{rcdIdsToAckMultiplicities} , peerObjectState'' ) = acknowledgeObjectIds decisionPolicy sharedState peerObjectState' @@ -292,7 +292,7 @@ pickObjectsToDownload disObjectsOwtPoolIds' = disObjectsOwtPoolIds - <> Set.fromList (map fst pdObjectsToPool) + <> Set.fromList (map fst pdObjectsToSubmitToPoolIds) in if dpsNumIdsInflight peerObjectState'' > 0 then -- we can request `objectId`s & `object`s @@ -313,7 +313,7 @@ pickObjectsToDownload $ peerObjectState'' , pdIdsToReq = numObjectIdsToReq , pdObjectsToReqIds = pdObjectsToReqIdsMap - , pdObjectsToPool = pdObjectsToPool + , pdObjectsToSubmitToPoolIds = pdObjectsToSubmitToPoolIds } ) ) @@ -374,9 +374,9 @@ pickObjectsToDownload { pdIdsToAck = 0 , pdIdsToReq = 0 , pdObjectsToReqIds - , pdObjectsToPool } + , pdObjectsToSubmitToPoolIds } | null pdObjectsToReqIds - , Map.null pdObjectsToPool -> + , Map.null pdObjectsToSubmitToPoolIds -> Nothing _ -> Just (a, b) ) @@ -388,8 +388,8 @@ pickObjectsToDownload Map objectId Int -> (a, PeerDecision objectId object) -> Map objectId Int - updateInSubmissionToObjectPoolObjects m (_, PeerDecision{pdObjectsToPool}) = - List.foldl' fn m (Map.toList pdObjectsToPool) + updateInSubmissionToObjectPoolObjects m (_, PeerDecision{pdObjectsToSubmitToPoolIds}) = + List.foldl' fn m (Map.toList pdObjectsToSubmitToPoolIds) where fn :: Map objectId Int -> diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index f357e3736e..69dd679c61 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -12,7 +12,7 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry , DecisionGlobalStateVar , newPeerDecisionChannelsVar , newObjectPoolSem - , InboundPeerAPI (..) + , PeerStateAPI (..) , withPeer , decisionLogicThread ) where @@ -28,6 +28,7 @@ import Data.Foldable as Foldable (foldl', traverse_) import Data.Hashable import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map +import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set import Data.Void (Void) @@ -37,6 +38,8 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State qualifi import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API import Control.Monad (forever) +import Data.Set (Set) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq) -- | Communication channels between `ObjectDiffusion` mini-protocol inbound side -- and decision logic. @@ -50,17 +53,22 @@ newPeerDecisionChannelsVar :: MonadMVar m => m (PeerDecisionChannelsVar m peerAddr objectId object) newPeerDecisionChannelsVar = newMVar (Map.empty) -data InboundPeerAPI m objectId object = InboundPeerAPI - { readPeerDecision :: m (PeerDecision objectId object) +data PeerStateAPI m objectId object = PeerStateAPI + { psaReadDecision :: m (PeerDecision objectId object) -- ^ a blocking action which reads `PeerDecision` - , handleReceivedIds :: StrictSeq.StrictSeq objectId -> m () - , handleReceivedObjects :: Map objectId object -> m () - , submitObjectsToPool :: [object] -> m () + , psaOnRequestIds :: NumObjectIdsReq -> m () + , psaOnRequestObjects :: Set objectId -> m () + , psaSubmitObjectsToPool :: StrictSeq objectId -> m () + , psaOnReceivedIds :: [objectId] -> m () + -- ^ Error handling should have been done before calling this + , psaOnReceivedObjects :: [object] -> m () + -- ^ Error handling should have been done before calling this + -- Also every object should have been validated! } -- | A bracket function which registers / de-registers a new peer in --- `DecisionGlobalStateVar` and `PeerDecisionChannelsVar`s, which exposes `InboundPeerAPI`. --- `InboundPeerAPI` is only safe inside the `withPeer` scope. +-- `DecisionGlobalStateVar` and `PeerDecisionChannelsVar`s, which exposes `PeerStateAPI`. +-- `PeerStateAPI` is only safe inside the `withPeer` scope. withPeer :: forall object peerAddr objectId ticketNo m a. ( MonadMask m @@ -80,8 +88,8 @@ withPeer :: ObjectPoolWriter objectId object m -> -- | new peer peerAddr -> - -- | callback which gives access to `InboundPeerAPI` - (InboundPeerAPI m objectId object -> m a) -> + -- | callback which gives access to `PeerStateAPI` + (PeerStateAPI m objectId object -> m a) -> m a withPeer decisionTracer @@ -96,7 +104,7 @@ withPeer withAPI = bracket registerPeerAndCreateAPI unregisterPeer withAPI where - registerPeerAndCreateAPI :: m (InboundPeerAPI m objectId object) + registerPeerAndCreateAPI :: m (PeerStateAPI m objectId object) registerPeerAndCreateAPI = do -- create the API for this peer, obtaining a channel for it in the process !inboundPeerAPI <- @@ -114,28 +122,40 @@ withPeer return (chan, Map.insert peerAddr chan peerToChannel) return ( peerToChannel' - , InboundPeerAPI - { readPeerDecision = takeMVar chan' - , handleReceivedIds = State.handleReceivedIds + , PeerStateAPI + { psaReadDecision = takeMVar chan' + , psaOnRequestIds = State.onRequestIds objectDiffusionTracer decisionTracer globalStateVar objectPoolWriter peerAddr - (error "TODO: provide the number of requested IDs") - , handleReceivedObjects = State.handleReceivedObjects + , psaOnRequestObjects = State.onRequestObjects objectDiffusionTracer decisionTracer globalStateVar objectPoolWriter peerAddr - , submitObjectsToPool = State.submitObjectsToPool + , psaSubmitObjectsToPool = State.submitObjectsToPool objectDiffusionTracer decisionTracer globalStateVar objectPoolWriter objectPoolSem peerAddr + , psaOnReceivedIds = State.onReceivedIds + objectDiffusionTracer + decisionTracer + globalStateVar + objectPoolWriter + peerAddr + (error "TODO: provide the number of requested IDs") + , psaOnReceivedObjects = State.onReceivedObjects + objectDiffusionTracer + decisionTracer + globalStateVar + objectPoolWriter + peerAddr } ) -- register the peer in the global state now @@ -145,7 +165,7 @@ withPeer return inboundPeerAPI where - unregisterPeer :: InboundPeerAPI m objectId object -> m () + unregisterPeer :: PeerStateAPI m objectId object -> m () unregisterPeer _ = -- the handler is a short blocking operation, thus we need to use -- `uninterruptibleMask_` diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 4ac6d02669..891df6c8da 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -8,8 +8,10 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State ( -- * Core API DecisionGlobalState (..) , DecisionPeerState (..) - , handleReceivedIds - , handleReceivedObjects + , onRequestIds + , onRequestObjects + , onReceivedIds + , onReceivedObjects , submitObjectsToPool ) where @@ -30,13 +32,109 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (ObjectPo import Ouroboros.Consensus.Util.IOLike (MonadMask, MonadMVar, bracket_) import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq) -strictSeqToSet :: Ord a => StrictSeq a -> Set a -strictSeqToSet = Foldable.foldl' (flip Set.insert) Set.empty +onRequestIds :: + forall m peerAddr object objectId. + (MonadSTM m, Ord objectId, Ord peerAddr) => + Tracer m (TraceObjectDiffusionInbound objectId object) -> + Tracer m (TraceDecisionLogic peerAddr objectId object) -> + DecisionGlobalStateVar m peerAddr objectId object -> + ObjectPoolWriter objectId object m -> + peerAddr -> + -- | number of requests to req + NumObjectIdsReq -> + m () +onRequestIds odTracer decisionTracer globalStateVar _objectPoolWriter peerAddr numIdsToReq = do + globalState' <- atomically $ do + stateTVar + globalStateVar + ( \globalState -> + let globalState' = onRequestIdsImpl peerAddr numIdsToReq globalState + in (globalState', globalState') ) + traceWith odTracer (TraceObjectDiffusionInboundRequestedIds (fromIntegral numIdsToReq)) + traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onRequestIds" globalState') + +onRequestIdsImpl :: + forall peerAddr object objectId. + (Ord objectId, Ord peerAddr, HasCallStack) => + peerAddr -> + -- | number of requests to req + NumObjectIdsReq -> + DecisionGlobalState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object +onRequestIdsImpl + peerAddr + numIdsToReq + globalState@DecisionGlobalState + { dgsPeerStates + } = + globalState + { dgsPeerStates = dgsPeerStates' + } + where + dgsPeerStates' = + Map.adjust + (\ps@DecisionPeerState{dpsNumIdsInflight} -> ps{dpsNumIdsInflight = dpsNumIdsInflight + numIdsToReq}) + peerAddr + dgsPeerStates + +onRequestObjects :: + forall m peerAddr object objectId. + (MonadSTM m, Ord objectId, Ord peerAddr) => + Tracer m (TraceObjectDiffusionInbound objectId object) -> + Tracer m (TraceDecisionLogic peerAddr objectId object) -> + DecisionGlobalStateVar m peerAddr objectId object -> + ObjectPoolWriter objectId object m -> + peerAddr -> + -- | objets to request, by id + Set objectId -> + m () +onRequestObjects odTracer decisionTracer globalStateVar _objectPoolWriter peerAddr objectIds = do + globalState' <- atomically $ do + stateTVar + globalStateVar + ( \globalState -> + let globalState' = onRequestObjectsImpl peerAddr objectIds globalState + in (globalState', globalState') ) + traceWith odTracer (TraceObjectDiffusionInboundRequestedObjects (Set.size objectIds)) + traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onRequestObjects" globalState') --- | Wrapper around `handleReceivedIdsImpl`. +onRequestObjectsImpl :: + forall peerAddr object objectId. + (Ord objectId, Ord peerAddr, HasCallStack) => + peerAddr -> + -- | objets to request, by id + Set objectId -> + DecisionGlobalState peerAddr objectId object -> + DecisionGlobalState peerAddr objectId object +onRequestObjectsImpl + peerAddr + objectIds + globalState@DecisionGlobalState + { dgsPeerStates + , dgsObjectsInflightMultiplicities + } = + globalState + { dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' + , dgsPeerStates = dgsPeerStates' + } + where + dgsObjectsInflightMultiplicities' = + Foldable.foldl' + increaseCount + dgsObjectsInflightMultiplicities + objectIds + dgsPeerStates' = + Map.adjust + (\ps@DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} -> + ps{dpsObjectsAvailableIds = dpsObjectsAvailableIds \\ objectIds, + dpsObjectsInflightIds = dpsObjectsInflightIds `Set.union` objectIds}) + peerAddr + dgsPeerStates + +-- | Wrapper around `onReceivedIdsImpl`. -- Obtain the `hasObject` function atomically from the STM context and -- updates and traces the global state TVar. -handleReceivedIds :: +onReceivedIds :: forall m peerAddr object objectId. (MonadSTM m, Ord objectId, Ord peerAddr) => Tracer m (TraceObjectDiffusionInbound objectId object) -> @@ -48,20 +146,20 @@ handleReceivedIds :: -- `dpsNumIdsInflight` NumObjectIdsReq -> -- | sequence of received `objectIds` - StrictSeq objectId -> + [objectId] -> -- | received `objectId`s m () -handleReceivedIds odTracer decisionTracer globalStateVar objectPoolWriter peerAddr numIdsInitiallyRequested receivedIdsSeq = do +onReceivedIds odTracer decisionTracer globalStateVar objectPoolWriter peerAddr numIdsInitiallyRequested receivedIds = do globalState' <- atomically $ do hasObject <- opwHasObject objectPoolWriter stateTVar globalStateVar - ( \globalState -> let globalState' = handleReceivedIdsImpl hasObject peerAddr numIdsInitiallyRequested receivedIdsSeq globalState + ( \globalState -> let globalState' = onReceivedIdsImpl hasObject peerAddr numIdsInitiallyRequested receivedIds globalState in (globalState', globalState') ) - traceWith odTracer (TraceObjectDiffusionInboundReceivedIds (StrictSeq.length receivedIdsSeq)) - traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedIds" globalState') + traceWith odTracer (TraceObjectDiffusionInboundReceivedIds (length receivedIds)) + traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onReceivedIds" globalState') -handleReceivedIdsImpl :: +onReceivedIdsImpl :: forall peerAddr object objectId. (Ord objectId, Ord peerAddr, HasCallStack) => -- | check if objectId is in the objectpool, ref @@ -72,14 +170,14 @@ handleReceivedIdsImpl :: -- `dpsNumIdsInflight` NumObjectIdsReq -> -- | sequence of received `objectId`s - StrictSeq objectId -> + [objectId] -> DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object -handleReceivedIdsImpl +onReceivedIdsImpl hasObject peerAddr numIdsInitiallyRequested - receivedIdsSeq + receivedIds globalState@DecisionGlobalState { dgsPeerStates , dgsObjectsPendingMultiplicities @@ -96,7 +194,7 @@ handleReceivedIdsImpl , dpsNumIdsInflight } = findWithDefault - (error "ObjectDiffusion.handleReceivedIdsImpl: the peer should appear in dgsPeerStates") + (error "ObjectDiffusion.onReceivedIdsImpl: the peer should appear in dgsPeerStates") peerAddr dgsPeerStates @@ -107,13 +205,13 @@ handleReceivedIdsImpl && objectId `Set.notMember` dpsObjectsInflightIds -- object isn't in flight from current peer && objectId `Map.notMember` dgsObjectsPendingMultiplicities -- the object has not been successfully downloaded from another peer && objectId `Map.notMember` dgsObjectsOwtPoolMultiplicities -- (either pending ack or owt pool) - ) $ strictSeqToSet $ receivedIdsSeq + ) $ Set.fromList receivedIds dpsObjectsAvailableIds' = dpsObjectsAvailableIds `Set.union` newObjectsAvailableIds -- Add received objectIds to `dpsOutstandingFifo`. - dpsOutstandingFifo' = dpsOutstandingFifo <> receivedIdsSeq + dpsOutstandingFifo' = dpsOutstandingFifo <> StrictSeq.fromList receivedIds peerState' = assert @@ -126,7 +224,7 @@ handleReceivedIdsImpl dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates --- | Wrapper around `handleReceivedObjectsImpl` that updates and traces the +-- | Wrapper around `onReceivedObjectsImpl` that updates and traces the -- global state TVar. -- -- Error handling should be done by the client before using the API. @@ -134,7 +232,7 @@ handleReceivedIdsImpl -- assert (objectsRequestedIds `Set.isSubsetOf` dpsObjectsInflightIds) -- -- IMPORTANT: We also assume that every object has been *validated* before being passed to this function. -handleReceivedObjects :: +onReceivedObjects :: forall m peerAddr object objectId. ( MonadSTM m , Ord objectId @@ -146,18 +244,25 @@ handleReceivedObjects :: ObjectPoolWriter objectId object m -> peerAddr -> -- | received objects - Map objectId object -> + [object] -> m () -handleReceivedObjects odTracer tracer globalStateVar _objectPoolWriter peerAddr objectsReceived = do +onReceivedObjects odTracer tracer globalStateVar objectPoolWriter peerAddr objectsReceived = do + let getId = opwObjectId objectPoolWriter + globalState' <- atomically $ do stateTVar globalStateVar - ( \globalState -> let globalState' = handleReceivedObjectsImpl peerAddr objectsReceived globalState - in (globalState', globalState') ) - traceWith odTracer (TraceObjectDiffusionInboundReceivedObjects (Map.size objectsReceived)) - traceWith tracer (TraceDecisionLogicGlobalStateUpdated "handleReceivedObjects" globalState') + ( \globalState -> + let globalState' = + onReceivedObjectsImpl + peerAddr + (Map.fromList $ (\obj -> (getId obj, obj)) <$> objectsReceived) + globalState + in (globalState', globalState')) + traceWith odTracer (TraceObjectDiffusionInboundReceivedObjects (length objectsReceived)) + traceWith tracer (TraceDecisionLogicGlobalStateUpdated "onReceivedObjects" globalState') -handleReceivedObjectsImpl :: +onReceivedObjectsImpl :: forall peerAddr object objectId. ( Ord peerAddr , Ord objectId @@ -167,7 +272,7 @@ handleReceivedObjectsImpl :: Map objectId object -> DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object -handleReceivedObjectsImpl +onReceivedObjectsImpl peerAddr objectsReceived st@DecisionGlobalState @@ -188,7 +293,7 @@ handleReceivedObjectsImpl , dpsObjectsPending } = findWithDefault - (error "ObjectDiffusion.handleReceivedObjectsImpl: the peer should appear in dgsPeerStates") + (error "ObjectDiffusion.onReceivedObjectsImpl: the peer should appear in dgsPeerStates") peerAddr dgsPeerStates @@ -218,6 +323,7 @@ handleReceivedObjectsImpl dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates +-- | Should be called by `acknowledgeIds` submitObjectsToPool :: forall m peerAddr object objectId. ( Ord objectId @@ -232,7 +338,7 @@ submitObjectsToPool :: ObjectPoolWriter objectId object m -> ObjectPoolSem m -> peerAddr -> - [object] -> + StrictSeq objectId -> m () submitObjectsToPool odTracer @@ -241,23 +347,26 @@ submitObjectsToPool objectPoolWriter (ObjectPoolSem poolSem) peerAddr - objects = do + objectIds = do let getId = opwObjectId objectPoolWriter -- Move objects from `pending` to `owtPool` state - globalState' <- atomically $ stateTVar globalStateVar $ \globalState -> - let globalState' = + (globalState', objects) <- atomically $ stateTVar globalStateVar $ \globalState -> + let (globalState', objects) = Foldable.foldl' - (\st object -> updateStateWhenObjectOwtPool (getId object) st) - globalState - objects - in (globalState', globalState') + (\(st, acc) objectId -> + let (st', object) = updateStateWhenObjectOwtPool objectId st + in (st', object : acc)) + (globalState, []) + objectIds + in ((globalState', objects), globalState') traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "submitObjectsToPool.updateStateWhenObjectOwtPool" globalState') bracket_ (atomically $ waitTSem poolSem) (atomically $ signalTSem poolSem) $ do + -- When the lock over the object pool is obtained opwAddObjects objectPoolWriter objects traceWith odTracer $ @@ -276,7 +385,7 @@ submitObjectsToPool updateStateWhenObjectOwtPool :: objectId -> DecisionGlobalState peerAddr objectId object -> - DecisionGlobalState peerAddr objectId object + (DecisionGlobalState peerAddr objectId object, object) updateStateWhenObjectOwtPool objectId st@DecisionGlobalState @@ -284,23 +393,36 @@ submitObjectsToPool , dgsObjectsOwtPoolMultiplicities , dgsPeerStates } = - st + (st { dgsObjectsPendingMultiplicities = dgsObjectsPendingMultiplicities' , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' , dgsPeerStates = dgsPeerStates' - } + }, object) where + dgsObjectsPendingMultiplicities' = decreaseCount dgsObjectsPendingMultiplicities objectId dgsObjectsOwtPoolMultiplicities' = increaseCount dgsObjectsOwtPoolMultiplicities objectId + peerState@DecisionPeerState{dpsObjectsPending, dpsObjectsOwtPool} = + findWithDefault + (error "ObjectDiffusion.submitObjectsToPool: the peer should appear in dgsPeerStates") + peerAddr + dgsPeerStates + + object = findWithDefault + (error "ObjectDiffusion.submitObjectsToPool: the object should appear in dpsObjectsPending") + objectId + dpsObjectsPending + + peerState' = peerState + { dpsObjectsPending = Map.delete objectId dpsObjectsPending + , dpsObjectsOwtPool = Map.insert objectId object dpsObjectsOwtPool + } + dgsPeerStates' = - Map.adjust - (\ps@DecisionPeerState{dpsObjectsOwtPool, dpsObjectsPending} -> - let object = case Map.lookup objectId dpsObjectsPending of - Just obj -> obj - Nothing -> error "ObjectDiffusion.updateStateWhenObjectOwtPool: the object should be in dpsObjectsPending" - in ps{dpsObjectsPending = Map.delete objectId dpsObjectsPending, dpsObjectsOwtPool = Map.insert objectId object dpsObjectsOwtPool}) + Map.insert peerAddr + peerState' dgsPeerStates updateStateWhenObjectAddedToPool :: diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index ee6b567ed3..2a18aa9268 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -108,7 +108,7 @@ data DecisionPeerState objectId object = DecisionPeerState -- acknowledged and haven't been sent to the objectpool yet. -- -- Life cycle of entries: - -- * added when a object is downloaded in `handleReceivedObjectsImpl` + -- * added when a object is downloaded in `onReceivedObjectsImpl` -- * removed by `acknowledgeObjectIds` (to properly follow `dpsOutstandingFifo`) , dpsObjectsOwtPool :: !(Map objectId object) -- ^ A set of objects on their way to the objectpool. @@ -223,7 +223,7 @@ data PeerDecision objectId object = PeerDecision -- if we have non-acknowledged `objectId`s. , pdObjectsToReqIds :: !(Set objectId) -- ^ objectId's to download. - , pdObjectsToPool :: !(Set objectId) + , pdObjectsToSubmitToPoolIds :: !(StrictSeq objectId) -- ^ list of `object`s to submit to the objectpool. } deriving (Show, Eq) @@ -238,21 +238,21 @@ instance Ord objectId => Semigroup (PeerDecision objectId object) where , pdIdsToReq , pdCanPipelineIdsReq = _ignored , pdObjectsToReqIds - , pdObjectsToPool + , pdObjectsToSubmitToPoolIds } <> PeerDecision { pdIdsToAck = pdIdsToAck' , pdIdsToReq = pdIdsToReq' , pdCanPipelineIdsReq = pdCanPipelineIdsReq' , pdObjectsToReqIds = pdObjectsToReqIds' - , pdObjectsToPool = pdObjectsToPool' + , pdObjectsToSubmitToPoolIds = pdObjectsToSubmitToPoolIds' } = PeerDecision { pdIdsToAck = pdIdsToAck + pdIdsToAck' , pdIdsToReq = pdIdsToReq + pdIdsToReq' , pdCanPipelineIdsReq = pdCanPipelineIdsReq' , pdObjectsToReqIds = pdObjectsToReqIds <> pdObjectsToReqIds' - , pdObjectsToPool = pdObjectsToPool <> pdObjectsToPool' + , pdObjectsToSubmitToPoolIds = pdObjectsToSubmitToPoolIds <> pdObjectsToSubmitToPoolIds' } instance Ord objectId => Monoid (PeerDecision objectId object) where mempty = PeerDecision @@ -260,7 +260,7 @@ instance Ord objectId => Monoid (PeerDecision objectId object) where , pdIdsToReq = 0 , pdCanPipelineIdsReq = False , pdObjectsToReqIds = Set.empty - , pdObjectsToPool = Set.empty + , pdObjectsToSubmitToPoolIds = Set.empty } -- | ObjectLogic tracer. @@ -352,7 +352,9 @@ decreaseCount mmap k = mmap data TraceObjectDiffusionInbound objectId object - = TraceObjectDiffusionInboundReceivedIds Int + = TraceObjectDiffusionInboundRequestedIds Int + | TraceObjectDiffusionInboundRequestedObjects Int + | TraceObjectDiffusionInboundReceivedIds Int | TraceObjectDiffusionInboundReceivedObjects Int | TraceObjectDiffusionInboundAddedObjects Int | -- | Received a 'ControlMessage' from the outbound peer governor, and about From ffb9f3995b8a7afaa9d2c9536633f598d49b52c8 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Mon, 13 Oct 2025 16:01:04 +0200 Subject: [PATCH 19/43] Remove objectsPending field Co-authored-by: nbacquey --- .../ObjectDiffusion/Inbound/V2.hs | 127 ++++++++++++------ .../ObjectDiffusion/Inbound/V2.mermaid | 16 +-- .../ObjectDiffusion/Inbound/V2/Decision.hs | 1 - .../ObjectDiffusion/Inbound/V2/Registry.hs | 12 -- .../ObjectDiffusion/Inbound/V2/State.hs | 107 +++++---------- .../ObjectDiffusion/Inbound/V2/Types.hs | 26 +--- 6 files changed, 126 insertions(+), 163 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 28734c8273..6944aa17a4 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -43,14 +43,12 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State qualifi import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types as V2 import Ouroboros.Network.Protocol.ObjectDiffusion.Inbound import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API +import Ouroboros.Network.ControlMessage (ControlMessageSTM, ControlMessage (..)) --- | A object-submission inbound side (server, sic!). +-- | A object-submission inbound side (client). -- --- The server blocks on receiving `PeerDecision` from the decision logic. If --- there are object's to download it pipelines two requests: first for object's second --- for objectId's. If there are no object's to download, it either sends a blocking or --- non-blocking request for objectId's. +-- The goIdle' function blocks on receiving `PeerDecision` from the decision logic. objectDiffusionInbound :: forall objectId object ticketNo m. ( MonadDelay m @@ -59,15 +57,17 @@ objectDiffusionInbound :: ) => Tracer m (TraceObjectDiffusionInbound objectId object) -> ObjectDiffusionInitDelay -> - ObjectPoolWriter objectId object m -> + ControlMessageSTM m -> PeerStateAPI m objectId object -> ObjectDiffusionInboundPipelined objectId object m () objectDiffusionInbound tracer initDelay - ObjectPoolWriter{} + controlMessage PeerStateAPI { psaReadDecision + , psaOnRequestIds + , psaOnRequestObjects , psaOnReceivedIds , psaOnReceivedObjects , psaSubmitObjectsToPool @@ -76,59 +76,104 @@ objectDiffusionInbound case initDelay of ObjectDiffusionInitDelay delay -> threadDelay delay NoObjectDiffusionInitDelay -> return () - serverIdle + (goIdle Zero) where - serverIdle :: + goIdle :: forall (n :: N). + Nat n -> + m (InboundStIdle Z objectId object m ()) + goIdle n = do + ctrlMsg <- atomically controlMessageSTM + traceWith tracer $ TraceObjectDiffusionInboundReceivedControlMessage ctrlMsg + case ctrlMsg of + -- The peer selection governor is asking us to terminate the connection. + Terminate -> + pure $ terminateAfterDrain n + -- Otherwise, we can continue the protocol normally. + _continue -> goIdle' + goIdle' :: forall (n :: N). + Nat n -> m (InboundStIdle Z objectId object m ()) - serverIdle = do + goIdle' n = do -- Block on next decision. - object@PeerDecision - { pdObjectsToReqIds = pdObjectsToReqIds + decision@PeerDecision + { pdIdsToAck + , pdIdsToReq + , pdObjectsToReqIds + , pdCanPipelineIdsReq , pdObjectsToSubmitToPoolIds = pdObjectsToSubmitToPoolIds } <- psaReadDecision - traceWith tracer (TraceObjectDiffusionInboundReceivedDecision object) - - let !collected = length undefined - - -- Only attempt to add objects if we have some work to do - when (collected > 0) $ do - -- submitObjectToPool traces: - -- \* `TraceObjectDiffusionInboundAddedObjects`, - -- \* `TraceObjectInboundAddedToObjectPool`, and - -- \* `TraceObjectInboundRejectedFromObjectPool` - -- events. - mapM_ undefined undefined -- (uncurry $ submitObjectsToPool undefined) undefined - - -- TODO: - -- We can update the state so that other `object-submission` servers will - -- not try to add these objects to the objectpool. + traceWith tracer (TraceObjectDiffusionInboundReceivedDecision decision) + + when (not StrictSeq.null pdObjectsToSubmitToPoolIds) $ do + psaSubmitObjectsToPool pdObjectsToSubmitToPoolIds + + let shouldRequestMoreObjects = not $ Set.null pdObjectsToReqIds + + case n of + -- We didn't pipeline any requests, so there are no replies in flight + -- (nothing to collect) + Zero -> + if shouldRequestMoreObjects + then do + -- There are no replies in flight, but we do know some more objects + -- we can ask for, so lets ask for them and more objectIds in a + -- pipelined way. + traceWith tracer (TraceObjectDiffusionInboundCanRequestMoreObjects (natToInt n)) + goReqObjectsAndIdsPipelined Zero decision + else do + -- There's no replies in flight, and we have no more objects we can + -- ask for so the only remaining thing to do is to ask for more + -- objectIds. Since this is the only thing to do now, we make this a + -- blocking call. + traceWith tracer (TraceObjectDiffusionInboundCannotRequestMoreObjects (natToInt n)) + goReqIdsBlocking decision + + -- We have pipelined some requests, so there are some replies in flight. + n@(Succ _) -> + if shouldRequestMoreObjects + then do + -- We have replies in flight and we should eagerly collect them if + -- available, but there are objects to request too so we + -- should *not* block waiting for replies. + -- So we ask for new objects and objectIds in a pipelined way. + pure $ CollectPipelined + -- if no replies are available immediately, we continue with + (Just (goReqObjectsIdsPipelined n decision)) + -- if one reply is available, we go collect it. + -- We will continue to goIdle after; so in practice we will loop + -- until all immediately available replies have been collected + -- before requesting objects and ids in a pipelined fashion + (goCollect n decision) + else do + + if Set.null pdObjectsToReqIds - then serverReqObjectIds Zero object - else serverReqObjects object + then goReqObjectIds Zero object + else goReqObjects object -- Pipelined request of objects - serverReqObjects :: + goReqObjects :: PeerDecision objectId object -> m (InboundStIdle Z objectId object m ()) - serverReqObjects object@PeerDecision{pdObjectsToReqIds = pdObjectsToReqIds} = + goReqObjects object@PeerDecision{pdObjectsToReqIds = pdObjectsToReqIds} = pure $ SendMsgRequestObjectsPipelined (Set.toList pdObjectsToReqIds) - (serverReqObjectIds (Succ Zero) object) + (goReqObjectIds (Succ Zero) object) - serverReqObjectIds :: + goReqObjectIds :: forall (n :: N). Nat n -> PeerDecision objectId object -> m (InboundStIdle n objectId object m ()) - serverReqObjectIds + goReqObjectIds n PeerDecision{pdIdsToReq = 0} = case n of - Zero -> serverIdle + Zero -> goIdle Succ _ -> handleReplies n - serverReqObjectIds + goReqObjectIds -- if there are no unacknowledged objectIds, the protocol requires sending -- a blocking `MsgRequestObjectIds` request. This is important, as otherwise -- the client side wouldn't have a chance to terminate the @@ -152,9 +197,9 @@ objectDiffusionInbound when (StrictSeq.length receivedIdsSeq > fromIntegral objectIdsToReq) $ throwIO ProtocolErrorObjectIdsNotRequested onReceivedIds objectIdsToReq receivedIdsSeq objectIdsMap - serverIdle + goIdle ) - serverReqObjectIds + goReqObjectIds n@Zero PeerDecision { pdIdsToAck = objectIdsToAck @@ -166,7 +211,7 @@ objectDiffusionInbound objectIdsToAck objectIdsToReq (handleReplies (Succ n)) - serverReqObjectIds + goReqObjectIds n@Succ{} PeerDecision { pdIdsToAck = objectIdsToAck @@ -195,7 +240,7 @@ objectDiffusionInbound pure $ CollectPipelined Nothing - (handleReply serverIdle) + (handleReply goIdle) handleReply :: forall (n :: N). diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid index 14737931a2..91d26e5700 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid @@ -3,11 +3,9 @@ flowchart TD B(dpsOutstandingFifo) C(dpsObjectsAvailableIds) D(dpsObjectsInflightIds) - E(dpsObjectsPending) F(dpsObjectsOwtPool) H(dgsObjectsInflightMultiplicities) - G(dgsObjectsPendingMultiplicities) I(dgsObjectsOwtPoolMultiplicities) EA{requestIds} @@ -24,24 +22,18 @@ flowchart TD EC -->|+ids| D EC -->|+count| H - ED{handleReceiveObjects} + ED{handleReceiveObjects / submitToPool} D -->|-ids| ED H -->|-count| ED IN2@{ shape: lin-cyl, label: "objects" } --o ED - ED -->|+objects| E - ED -->|+count| G + ED -->|+objects| F + ED -->|+count| I EE{makeDecisionPreAcknowledge} B -->|-ids| EE + C -->|-ids| EE EE -.->|readDecision : pdIdsToAck + pdIdsToReq + pdCanPipelineIdsReq/| EA EE -.->|readDecision : pdObjectsToReqIds| EC - EE -.->|readDecision : pdObjectsToSubmitToPoolIds| EF - - EF{submitToPool} - E -->|-objects| EF - G -->|-count| EF - EF -->|+objects| F - EF -->|+count| I EG{Added to pool} F -->|-objects| EG diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index e04da7a8b4..3894885370 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -135,7 +135,6 @@ pickObjectsToDownload { dgsPeerStates , dgsObjectsInflightMultiplicities , dgsObjectsOwtPoolMultiplicities - , dgsObjectsPendingMultiplicities } = -- outer fold: fold `[(peerAddr, DecisionPeerState objectId object)]` List.mapAccumR diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 69dd679c61..dd12072f60 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -191,7 +191,6 @@ withPeer , dpsNumIdsInflight = 0 , dpsObjectsInflightIds = Set.empty , dpsOutstandingFifo = StrictSeq.empty - , dpsObjectsPending = Map.empty , dpsObjectsOwtPool = Map.empty } dgsPeerStates @@ -206,20 +205,17 @@ withPeer st@DecisionGlobalState { dgsPeerStates , dgsObjectsInflightMultiplicities - , dgsObjectsPendingMultiplicities , dgsObjectsOwtPoolMultiplicities } = st { dgsPeerStates = dgsPeerStates' , dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' - , dgsObjectsPendingMultiplicities = dgsObjectsPendingMultiplicities' , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' } where -- First extract the DPS of the specified peer from the DGS ( DecisionPeerState { dpsObjectsInflightIds - , dpsObjectsPending , dpsObjectsOwtPool } , dgsPeerStates' @@ -240,14 +236,6 @@ withPeer dgsObjectsInflightMultiplicities dpsObjectsInflightIds - -- Update the dgsObjectsPendingMultiplicities map by decreasing the count of each - -- objectId which is part of the dpsObjectsPending of this peer. - dgsObjectsPendingMultiplicities' = - Foldable.foldl' - decreaseCount - dgsObjectsPendingMultiplicities - (Map.keysSet dpsObjectsPending) - -- Finally, we need to update dgsObjectsOwtPoolMultiplicities by decreasing the count of -- each objectId which is part of the dpsObjectsOwtPool of this peer. dgsObjectsOwtPoolMultiplicities' = diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 891df6c8da..788e2e9baa 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -12,7 +12,6 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State , onRequestObjects , onReceivedIds , onReceivedObjects - , submitObjectsToPool ) where import Control.Concurrent.Class.MonadSTM.Strict @@ -22,7 +21,6 @@ import Control.Tracer (Tracer, traceWith) import Data.Foldable qualified as Foldable import Data.Map.Strict (Map, findWithDefault) import Data.Map.Strict qualified as Map -import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq import Data.Set ((\\), Set) import Data.Set qualified as Set @@ -180,8 +178,6 @@ onReceivedIdsImpl receivedIds globalState@DecisionGlobalState { dgsPeerStates - , dgsObjectsPendingMultiplicities - , dgsObjectsOwtPoolMultiplicities } = globalState { dgsPeerStates = dgsPeerStates' @@ -203,8 +199,15 @@ onReceivedIdsImpl Set.filter (\objectId -> (not . hasObject $ objectId) -- object isn't already in the object pool && objectId `Set.notMember` dpsObjectsInflightIds -- object isn't in flight from current peer - && objectId `Map.notMember` dgsObjectsPendingMultiplicities -- the object has not been successfully downloaded from another peer - && objectId `Map.notMember` dgsObjectsOwtPoolMultiplicities -- (either pending ack or owt pool) + + -- We keep as "available" objects that have already been downloaded + -- from other peers but haven't been added to the object pool yet. + -- (aka. are in dpsObjectsOwtPool). + -- That's because if a peer disconnects while the objects are pending + -- or owt pool, they will be lost forever, and other peers won't be + -- able to re-request them. + -- See discussion: + -- https://moduscreate.slack.com/archives/C0937JQQ1F0/p1760343643030879?thread_ts=1760105747.965519&cid=C0937JQQ1F0 ) $ Set.fromList receivedIds dpsObjectsAvailableIds' = @@ -235,6 +238,8 @@ onReceivedIdsImpl onReceivedObjects :: forall m peerAddr object objectId. ( MonadSTM m + , MonadMask m + , MonadMVar m , Ord objectId , Ord peerAddr ) => @@ -242,12 +247,14 @@ onReceivedObjects :: Tracer m (TraceDecisionLogic peerAddr objectId object) -> DecisionGlobalStateVar m peerAddr objectId object -> ObjectPoolWriter objectId object m -> + ObjectPoolSem m -> peerAddr -> -- | received objects [object] -> m () -onReceivedObjects odTracer tracer globalStateVar objectPoolWriter peerAddr objectsReceived = do +onReceivedObjects odTracer tracer globalStateVar objectPoolWriter poolSem peerAddr objectsReceived = do let getId = opwObjectId objectPoolWriter + let objectsReceivedMap = Map.fromList $ (\obj -> (getId obj, obj)) <$> objectsReceived globalState' <- atomically $ do stateTVar @@ -256,11 +263,19 @@ onReceivedObjects odTracer tracer globalStateVar objectPoolWriter peerAddr objec let globalState' = onReceivedObjectsImpl peerAddr - (Map.fromList $ (\obj -> (getId obj, obj)) <$> objectsReceived) + objectsReceivedMap globalState in (globalState', globalState')) traceWith odTracer (TraceObjectDiffusionInboundReceivedObjects (length objectsReceived)) traceWith tracer (TraceDecisionLogicGlobalStateUpdated "onReceivedObjects" globalState') + submitObjectsToPool + odTracer + tracer + globalStateVar + objectPoolWriter + poolSem + peerAddr + objectsReceivedMap onReceivedObjectsImpl :: forall peerAddr object objectId. @@ -278,11 +293,11 @@ onReceivedObjectsImpl st@DecisionGlobalState { dgsPeerStates , dgsObjectsInflightMultiplicities - , dgsObjectsPendingMultiplicities + , dgsObjectsOwtPoolMultiplicities } = st { dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' - , dgsObjectsPendingMultiplicities = dgsObjectsPendingMultiplicities' + , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' , dgsPeerStates = dgsPeerStates' } where @@ -290,7 +305,7 @@ onReceivedObjectsImpl peerState@DecisionPeerState { dpsObjectsInflightIds - , dpsObjectsPending + , dpsObjectsOwtPool } = findWithDefault (error "ObjectDiffusion.onReceivedObjectsImpl: the peer should appear in dgsPeerStates") @@ -307,18 +322,18 @@ onReceivedObjectsImpl dgsObjectsInflightMultiplicities objectsReceivedIds - dpsObjectsPending' = dpsObjectsPending <> objectsReceived + dpsObjectsOwtPool' = dpsObjectsOwtPool <> objectsReceived - dgsObjectsPendingMultiplicities' = + dgsObjectsOwtPoolMultiplicities' = Foldable.foldl' increaseCount - dgsObjectsPendingMultiplicities + dgsObjectsOwtPoolMultiplicities objectsReceivedIds peerState' = peerState { dpsObjectsInflightIds = dpsObjectsInflightIds' - , dpsObjectsPending = dpsObjectsPending' + , dpsObjectsOwtPool = dpsObjectsOwtPool' } dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates @@ -338,7 +353,7 @@ submitObjectsToPool :: ObjectPoolWriter objectId object m -> ObjectPoolSem m -> peerAddr -> - StrictSeq objectId -> + Map objectId object -> m () submitObjectsToPool odTracer @@ -347,28 +362,16 @@ submitObjectsToPool objectPoolWriter (ObjectPoolSem poolSem) peerAddr - objectIds = do + objects = do let getId = opwObjectId objectPoolWriter - -- Move objects from `pending` to `owtPool` state - (globalState', objects) <- atomically $ stateTVar globalStateVar $ \globalState -> - let (globalState', objects) = - Foldable.foldl' - (\(st, acc) objectId -> - let (st', object) = updateStateWhenObjectOwtPool objectId st - in (st', object : acc)) - (globalState, []) - objectIds - in ((globalState', objects), globalState') - traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "submitObjectsToPool.updateStateWhenObjectOwtPool" globalState') - bracket_ (atomically $ waitTSem poolSem) (atomically $ signalTSem poolSem) $ do -- When the lock over the object pool is obtained - opwAddObjects objectPoolWriter objects + opwAddObjects objectPoolWriter (Map.elems objects) traceWith odTracer $ TraceObjectDiffusionInboundAddedObjects $ length objects @@ -381,50 +384,8 @@ submitObjectsToPool objects in (globalState', globalState') traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "submitObjectsToPool.updateStateWhenObjectAddedToPool" globalState') - where - updateStateWhenObjectOwtPool :: - objectId -> - DecisionGlobalState peerAddr objectId object -> - (DecisionGlobalState peerAddr objectId object, object) - updateStateWhenObjectOwtPool - objectId - st@DecisionGlobalState - { dgsObjectsPendingMultiplicities - , dgsObjectsOwtPoolMultiplicities - , dgsPeerStates - } = - (st - { dgsObjectsPendingMultiplicities = dgsObjectsPendingMultiplicities' - , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' - , dgsPeerStates = dgsPeerStates' - }, object) - where - - dgsObjectsPendingMultiplicities' = decreaseCount dgsObjectsPendingMultiplicities objectId - dgsObjectsOwtPoolMultiplicities' = increaseCount dgsObjectsOwtPoolMultiplicities objectId - - peerState@DecisionPeerState{dpsObjectsPending, dpsObjectsOwtPool} = - findWithDefault - (error "ObjectDiffusion.submitObjectsToPool: the peer should appear in dgsPeerStates") - peerAddr - dgsPeerStates - - object = findWithDefault - (error "ObjectDiffusion.submitObjectsToPool: the object should appear in dpsObjectsPending") - objectId - dpsObjectsPending - - peerState' = peerState - { dpsObjectsPending = Map.delete objectId dpsObjectsPending - , dpsObjectsOwtPool = Map.insert objectId object dpsObjectsOwtPool - } - - dgsPeerStates' = - Map.insert - peerAddr - peerState' - dgsPeerStates + where updateStateWhenObjectAddedToPool :: objectId -> DecisionGlobalState peerAddr objectId object -> diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 2a18aa9268..f6a9cc6b3f 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -67,6 +67,7 @@ import Data.Word (Word64) import Ouroboros.Network.ControlMessage (ControlMessage) import Control.DeepSeq (NFData) import Quiet (Quiet (..)) +import qualified Data.Sequence.Strict as StrictSeq -- | Semaphore to guard access to the ObjectPool newtype ObjectPoolSem m = ObjectPoolSem (TSem m) @@ -103,13 +104,6 @@ data DecisionPeerState objectId object = DecisionPeerState -- ^ A subset of `dpsOutstandingFifo` which were unknown to the peer -- (i.e. requested but not received). We need to track these `objectId`s -- since they need to be acknowledged. - , dpsObjectsPending :: !(Map objectId object) - -- ^ A set of objects downloaded from the peer. They are not yet - -- acknowledged and haven't been sent to the objectpool yet. - -- - -- Life cycle of entries: - -- * added when a object is downloaded in `onReceivedObjectsImpl` - -- * removed by `acknowledgeObjectIds` (to properly follow `dpsOutstandingFifo`) , dpsObjectsOwtPool :: !(Map objectId object) -- ^ A set of objects on their way to the objectpool. -- Tracked here so that we can cleanup `dgsObjectsOwtPoolMultiplicities` if the @@ -143,14 +137,9 @@ data DecisionGlobalState peerAddr objectId object = DecisionGlobalState -- -- This can intersect with some `dpsObjectsAvailableIds`. -- The value for any key must be always non-zero (strictly positive). - , dgsObjectsPendingMultiplicities :: !(Map objectId ObjectMultiplicity) - -- ^ Map from ids of objects which have already been downloaded and validated - -- but not yet acknowledged, to their multiplicities - -- - -- The value for any key must be always non-zero (strictly positive). , dgsObjectsOwtPoolMultiplicities :: !(Map objectId ObjectMultiplicity) -- ^ Map from ids of objects which have already been downloaded, validated, - -- acknowledged, and are on their way to the objectpool (waiting for the lock) + -- and are on their way to the objectpool (waiting for the lock) -- to their multiplicities -- -- * We subtract from the counter when a given object is added to the @@ -193,7 +182,6 @@ newDecisionGlobalStateVar rng = DecisionGlobalState { dgsPeerStates = Map.empty , dgsObjectsInflightMultiplicities = Map.empty - , dgsObjectsPendingMultiplicities = Map.empty , dgsObjectsOwtPoolMultiplicities = Map.empty , dgsRng = rng } @@ -223,8 +211,6 @@ data PeerDecision objectId object = PeerDecision -- if we have non-acknowledged `objectId`s. , pdObjectsToReqIds :: !(Set objectId) -- ^ objectId's to download. - , pdObjectsToSubmitToPoolIds :: !(StrictSeq objectId) - -- ^ list of `object`s to submit to the objectpool. } deriving (Show, Eq) @@ -238,21 +224,18 @@ instance Ord objectId => Semigroup (PeerDecision objectId object) where , pdIdsToReq , pdCanPipelineIdsReq = _ignored , pdObjectsToReqIds - , pdObjectsToSubmitToPoolIds } <> PeerDecision { pdIdsToAck = pdIdsToAck' , pdIdsToReq = pdIdsToReq' , pdCanPipelineIdsReq = pdCanPipelineIdsReq' , pdObjectsToReqIds = pdObjectsToReqIds' - , pdObjectsToSubmitToPoolIds = pdObjectsToSubmitToPoolIds' } = PeerDecision { pdIdsToAck = pdIdsToAck + pdIdsToAck' , pdIdsToReq = pdIdsToReq + pdIdsToReq' , pdCanPipelineIdsReq = pdCanPipelineIdsReq' , pdObjectsToReqIds = pdObjectsToReqIds <> pdObjectsToReqIds' - , pdObjectsToSubmitToPoolIds = pdObjectsToSubmitToPoolIds <> pdObjectsToSubmitToPoolIds' } instance Ord objectId => Monoid (PeerDecision objectId object) where mempty = PeerDecision @@ -260,7 +243,6 @@ instance Ord objectId => Monoid (PeerDecision objectId object) where , pdIdsToReq = 0 , pdCanPipelineIdsReq = False , pdObjectsToReqIds = Set.empty - , pdObjectsToSubmitToPoolIds = Set.empty } -- | ObjectLogic tracer. @@ -277,8 +259,6 @@ data ObjectDiffusionCounters -- ^ number of distinct in-flight objects. , odcNumTotalObjectsInflight :: Int -- ^ number of all in-flight objects. - , odcNumDistinctObjectsPending :: Int - -- ^ number of distinct pending objects (downloaded but not acked) , odcNumDistinctObjectsOwtPool :: Int -- ^ number of distinct objects which are waiting to be added to the -- objectpool (each peer need to acquire the semaphore to effectively add @@ -293,14 +273,12 @@ makeObjectDiffusionCounters :: makeObjectDiffusionCounters dgs@DecisionGlobalState { dgsObjectsInflightMultiplicities - , dgsObjectsPendingMultiplicities , dgsObjectsOwtPoolMultiplicities } = ObjectDiffusionCounters { odcNumDistinctObjectsAvailable = Map.size $ dgsObjectsAvailableMultiplicities dgs , odcNumDistinctObjectsInflight = Map.size dgsObjectsInflightMultiplicities , odcNumTotalObjectsInflight = fromIntegral $ mconcat (Map.elems dgsObjectsInflightMultiplicities) - , odcNumDistinctObjectsPending = Map.size dgsObjectsPendingMultiplicities , odcNumDistinctObjectsOwtPool = Map.size dgsObjectsOwtPoolMultiplicities } From cc30a9191bed8daa3b3779f8af9a0f37d71dd75f Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Mon, 13 Oct 2025 19:00:53 +0200 Subject: [PATCH 20/43] Simplify pickObjectsToReq logic --- .../ObjectDiffusion/Inbound/V2.hs | 39 ++- .../ObjectDiffusion/Inbound/V2/Decision.hs | 291 ++++++++++++++---- .../ObjectDiffusion/Inbound/V2/Registry.hs | 40 +-- .../ObjectDiffusion/Inbound/V2/State.hs | 22 +- .../ObjectDiffusion/Inbound/V2/Types.hs | 30 +- 5 files changed, 280 insertions(+), 142 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 6944aa17a4..2bca43e9de 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -44,6 +44,7 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types as V2 import Ouroboros.Network.Protocol.ObjectDiffusion.Inbound import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API import Ouroboros.Network.ControlMessage (ControlMessageSTM, ControlMessage (..)) +import Control.Concurrent.Class.MonadSTM (atomically) -- | A object-submission inbound side (client). @@ -63,14 +64,13 @@ objectDiffusionInbound :: objectDiffusionInbound tracer initDelay - controlMessage + controlMessageSTM PeerStateAPI { psaReadDecision , psaOnRequestIds , psaOnRequestObjects , psaOnReceivedIds , psaOnReceivedObjects - , psaSubmitObjectsToPool } = ObjectDiffusionInboundPipelined $ do case initDelay of @@ -96,11 +96,10 @@ objectDiffusionInbound goIdle' n = do -- Block on next decision. decision@PeerDecision - { pdIdsToAck - , pdIdsToReq + { pdNumIdsToAck + , pdNumIdsToReq , pdObjectsToReqIds - , pdCanPipelineIdsReq - , pdObjectsToSubmitToPoolIds = pdObjectsToSubmitToPoolIds + , pdCanPipelineIdsRequests } <- psaReadDecision traceWith tracer (TraceObjectDiffusionInboundReceivedDecision decision) @@ -145,12 +144,12 @@ objectDiffusionInbound -- until all immediately available replies have been collected -- before requesting objects and ids in a pipelined fashion (goCollect n decision) - else do + else do undefined if Set.null pdObjectsToReqIds - then goReqObjectIds Zero object - else goReqObjects object + then goReqObjectIds Zero undefined + else goReqObjects undefined -- Pipelined request of objects goReqObjects :: @@ -169,7 +168,7 @@ objectDiffusionInbound m (InboundStIdle n objectId object m ()) goReqObjectIds n - PeerDecision{pdIdsToReq = 0} = + PeerDecision{pdNumIdsToReq = 0} = case n of Zero -> goIdle Succ _ -> handleReplies n @@ -180,9 +179,9 @@ objectDiffusionInbound -- mini-protocol. Zero PeerDecision - { pdIdsToAck = objectIdsToAck - , pdCanPipelineIdsReq = False - , pdIdsToReq = objectIdsToReq + { pdNumIdsToAck = objectIdsToAck + , pdCanPipelineIdsRequests = False + , pdNumIdsToReq = objectIdsToReq } = pure $ SendMsgRequestObjectIdsBlocking @@ -202,9 +201,9 @@ objectDiffusionInbound goReqObjectIds n@Zero PeerDecision - { pdIdsToAck = objectIdsToAck - , pdCanPipelineIdsReq = True - , pdIdsToReq = objectIdsToReq + { pdNumIdsToAck = objectIdsToAck + , pdCanPipelineIdsRequests = True + , pdNumIdsToReq = objectIdsToReq } = pure $ SendMsgRequestObjectIdsPipelined @@ -214,13 +213,13 @@ objectDiffusionInbound goReqObjectIds n@Succ{} PeerDecision - { pdIdsToAck = objectIdsToAck - , pdCanPipelineIdsReq - , pdIdsToReq = objectIdsToReq + { pdNumIdsToAck = objectIdsToAck + , pdCanPipelineIdsRequests + , pdNumIdsToReq = objectIdsToReq } = -- it is impossible that we have had `object`'s to request (Succ{} - is an -- evidence for that), but no unacknowledged `objectId`s. - assert pdCanPipelineIdsReq $ + assert pdCanPipelineIdsRequests $ pure $ SendMsgRequestObjectIdsPipelined objectIdsToAck diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 3894885370..b943774bbb 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -30,12 +30,13 @@ import Data.Maybe (mapMaybe) import Data.Sequence.Strict qualified as StrictSeq import Data.Set (Set) import Data.Set qualified as Set +import Data.Foldable qualified as Foldable import GHC.Stack (HasCallStack) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Network.Protocol.ObjectDiffusion.Type -import System.Random (random) +import System.Random (random, StdGen) -- | Make download decisions. makeDecisions :: @@ -44,47 +45,215 @@ makeDecisions :: , Ord objectId , Hashable peerAddr ) => + (objectId -> Bool) -> -- | decision decisionPolicy DecisionPolicy -> -- | decision context DecisionGlobalState peerAddr objectId object -> - -- | list of available peers. - -- - -- This is a subset of `dgsPeerStates` of peers which either: - -- * can be used to download a `object`, - -- * can acknowledge some `objectId`s. - Map peerAddr (DecisionPeerState objectId object) -> ( DecisionGlobalState peerAddr objectId object , Map peerAddr (PeerDecision objectId object) ) -makeDecisions decisionPolicy st = - let (salt, rng') = random (dgsRng st) - st' = st{dgsRng = rng'} - in fn - . pickObjectsToDownload decisionPolicy st' - . orderByRejections salt - where - fn :: - forall a. - (a, [(peerAddr, PeerDecision objectId object)]) -> - (a, Map peerAddr (PeerDecision objectId object)) - fn (a, as) = (a, Map.fromList as) +makeDecisions hasObject decisionPolicy globalState = + -- We do it in two steps, because pre-acknowledging will remove objects from dpsObjectsAvailableIds sets of each peer, + -- that the pickObjectsToReq function will use to decide which objects can be requested. + let (globalState', ackAndRequestIdsDecisions) = preAcknowledge hasObject decisionPolicy globalState + (globalState'', objectsToReqSets) = pickObjectsToReq hasObject decisionPolicy globalState' + in (globalState'', Map.intersectionWith (\decision objectsToReqIds -> decision{ pdObjectsToReqIds = objectsToReqIds }) ackAndRequestIdsDecisions objectsToReqSets) + +-- | We pre-acknowledge the longest prefix of outstandingFifo of each peer that match the following criteria: +-- * either the object is owt pool for the peer who has downloaded it +-- * or the object is already in pool +preAcknowledge :: + forall peerAddr objectId object. + ( Ord peerAddr + , Ord objectId + ) => + (objectId -> Bool) -> + DecisionPolicy -> + DecisionGlobalState peerAddr objectId object -> + ( DecisionGlobalState peerAddr objectId object + , Map peerAddr (PeerDecision objectId object) + ) +preAcknowledge poolHasObject decisionPolicy globalState@DecisionGlobalState{dgsPeerStates} = + -- We use `Map.mapAccumWithKey` to traverse the peer states and both update + -- the peer states and accumulate ack decisions made along the way. + let (decisions, dgsPeerStates') = + Map.mapAccumWithKey preAcknowledgeForPeer Map.empty dgsPeerStates + in ( globalState{dgsPeerStates = dgsPeerStates'} + , decisions + ) --- | Order peers by how useful the objects they have provided are. --- --- objects delivered late will fail to apply because they were included in --- a recently adopted block. Peers can race against each other by setting --- `dpMaxObjectInflightMultiplicity` to > 1. In case of a tie a hash of the peerAddr --- is used as a tie breaker. Since every invocation use a new salt a given --- peerAddr does not have an advantage over time. -orderByRejections :: - Hashable peerAddr => - Int -> - Map peerAddr (DecisionPeerState objectId object) -> - [(peerAddr, DecisionPeerState objectId object)] -orderByRejections salt = - List.sortOn (\(peerAddr, ps) -> hashWithSalt salt peerAddr) - . Map.toList + where + preAcknowledgeForPeer :: + -- | Accumulator containing decisions already made for other peers + -- It's a map in which we need to insert the new decision into + Map peerAddr (PeerDecision objectId object) -> + peerAddr -> + DecisionPeerState objectId object -> + (Map peerAddr (PeerDecision objectId object), DecisionPeerState objectId object) + preAcknowledgeForPeer decisionsAcc peerAddr peerState@DecisionPeerState{dpsOutstandingFifo, dpsObjectsAvailableIds, dpsObjectsOwtPool} = + let + -- we isolate the longest prefix of outstandingFifo that matches our ack criteria (see above in preAcknowledge doc) + (idsToAck, dpsOutstandingFifo') = + StrictSeq.spanl + (\objectId -> poolHasObject objectId || objectId `Map.member` dpsObjectsOwtPool) + dpsOutstandingFifo + + -- we remove the acknowledged ids from dpsObjectsAvailableIds if they were present + -- we need to do that because objects that were advertised by this corresponding outbound peer + -- but never downloaded because we already have them in pool were consequently never removed + -- from dpsObjectsAvailableIds by onRequestObjects + dpsObjectsAvailableIds' = + Foldable.foldl' (\set objectId -> Set.delete objectId set) dpsObjectsAvailableIds idsToAck + + + pdNumIdsToAck = fromIntegral $ StrictSeq.length idsToAck + + -- should this be incremental or overwrite the previous value in the semigroup instance? + pdNumIdsToReq = + -- numOfUnacked = StrictSeq.length dpsOutstandingFifo + -- numOfAcked = StrictSeq.length acknowledgedObjectIds' + -- unackedAndRequested = fromIntegral numOfUnacked + dpsNumIdsInflight + + -- pdNumIdsToReq = + -- assert (unackedAndRequested <= dpMaxNumObjectsOutstanding) $ + -- assert (dpsNumIdsInflight <= dpMaxNumObjectIdsReq) $ + -- (dpMaxNumObjectsOutstanding - unackedAndRequested + fromIntegral numOfAcked) + -- `min` (dpMaxNumObjectIdsReq - dpsNumIdsInflight) + undefined -- TODO + + pdCanPipelineIdsRequests = not . StrictSeq.null $ dpsOutstandingFifo' + + peerDecision = PeerDecision + { pdNumIdsToAck + , pdNumIdsToReq + , pdCanPipelineIdsRequests + , pdObjectsToReqIds = Set.empty -- we don't decide this here + } + + peerState' = peerState + { dpsOutstandingFifo = dpsOutstandingFifo' + , dpsObjectsAvailableIds = dpsObjectsAvailableIds' + } + + in (Map.insert peerAddr peerDecision decisionsAcc, peerState') + +orderPeers :: Map peerAddr (DecisionPeerState objectId object) -> StdGen -> ([(peerAddr, DecisionPeerState objectId object)], StdGen) +orderPeers = undefined + +-- TODO: be careful about additive semigroup instance of PeerDecision +-- e.g. what if an object is first available and picked to download, but the download request isn't emitted yet +-- then the object is received from another peer, so we can ack it from our peer on the next makeDecision call +-- So later when the download request actually takes place, we don't need the object anymore, and it will no +-- longer be part of dpsObjectsAvailableIds of the peer! But also no longer in the FIFO +-- So if the requestIds doing the ack has been made before the requestObject, then the server +-- won't be able to serve the object. + +-- | This function could just be pure if it hadn't be for the rng used to order peers +pickObjectsToReq :: + forall peerAddr objectId object. + ( Ord peerAddr + , Ord objectId + , Hashable peerAddr + ) => + (objectId -> Bool) -> + DecisionPolicy -> + DecisionGlobalState peerAddr objectId object -> + (DecisionGlobalState peerAddr objectId object + , Map peerAddr (Set objectId)) +pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dpMaxNumObjectsInflightTotal,dpMaxObjectInflightMultiplicity} globalState@DecisionGlobalState{dgsRng, dgsPeerStates, dgsObjectsInflightMultiplicities, dgsObjectsOwtPoolMultiplicities} = + -- objects that are inflight or owtPool for a given peer are no longer in dpsObjectsAvailableIds of this peer + -- so we only need to filter out dpsObjectsAvailableIds by removing the objects that are already in pool + + let objectsExpectedSoonMultiplicities = Map.unionWith (+) dgsObjectsInflightMultiplicities dgsObjectsOwtPoolMultiplicities + + (orderedPeers, dgsRng') = orderPeers dgsPeerStates dgsRng + + -- We want to map each objectId to the sorted list of peers that can provide it (and from which we should download them preferably) + -- For each peer we also indicate how many objects it has in flight at the moment + -- We filter out here the objects that are already in pool + objectsToSortedProviders :: Map objectId [(peerAddr, NumObjectsReq)] + objectsToSortedProviders = + -- We iterate over each peer and the corresponding dpsObjectsAvailableIds + Foldable.foldl' + ( \accMap (peerAddr, DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds}) -> + -- For each peer, we iterate over dpsObjectsAvailableIds filtered from the objects that are already in pool + Foldable.foldl' + (\accMap' objectId -> Map.insertWith (++) objectId [(peerAddr, fromIntegral $ Set.size dpsObjectsInflightIds)] accMap') + accMap + (Set.filter (not . poolHasObject) dpsObjectsAvailableIds) + ) + Map.empty + orderedPeers + + totalNumObjectsInflight :: NumObjectsReq + totalNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 dgsObjectsInflightMultiplicities + + -- Now we combine these maps for easy fold + objectsToProvidersAndExpectedMultiplicities :: Map objectId ([(peerAddr, NumObjectsReq)], ObjectMultiplicity) + objectsToProvidersAndExpectedMultiplicities = + Map.merge + -- if an objectId is missing from objectsExpectedSoonMultiplicities, then its expected multiplicity is 0 + (Map.mapMissing \_ providers -> (providers, 0)) + -- if an objectId is missing from objectsToSortedProviders, then we don't care about it + Map.dropMissing + -- Combine in a tuple the list of providers and the expected multiplicity + (Map.zipWithMatched \_ providers expectedMultiplicity -> (providers, expectedMultiplicity)) + objectsToSortedProviders + objectsExpectedSoonMultiplicities + + St{peersToObjectsToReq} = + -- We iterate over each objectId and the corresponding (providers, expectedMultiplicity) + Map.foldlWithKey' + ( \st objectId (providers, expectedMultiplicity) -> + -- reset the objectMultiplicity counter for each new objectId + let st' = st{objectMultiplicity = 0} + + in Foldable.foldl' + (howToFoldProviderList objectId expectedMultiplicity) + st' + providers + ) + St{ + totalNumObjectsToReq = 0 + , objectMultiplicity = 0 + , peersToObjectsToReq = Map.empty + } + objectsToProvidersAndExpectedMultiplicities + + -- This function decides whether or not we should select a given peer as provider for the current objectId + -- it takes into account if we are expecting to obtain the object from other sources (either inflight/owt pool already, or if the object will be requested from already selected peers in this given round) + howToFoldProviderList :: objectId -> ObjectMultiplicity -> St peerAddr objectId -> (peerAddr, NumObjectsReq) -> St peerAddr objectId + howToFoldProviderList objectId expectedMultiplicity st@St{totalNumObjectsToReq, objectMultiplicity, peersToObjectsToReq} (peerAddr, numObjectsInFlight) = + let -- see what has already been attributed to this peer + objectsToReq = Map.findWithDefault Set.empty peerAddr peersToObjectsToReq + + shouldSelect = + -- We should not go over the multiplicity limit per object + objectMultiplicity + expectedMultiplicity < dpMaxObjectInflightMultiplicity + -- We should not go over the total number of objects inflight limit + && totalNumObjectsInflight + totalNumObjectsToReq < dpMaxNumObjectsInflightTotal + -- We should not go over the per-peer number of objects inflight limit + && numObjectsInFlight + (fromIntegral $ Set.size objectsToReq) < dpMaxNumObjectsInflightPerPeer + + in if shouldSelect + then + -- We increase both global count and per-object count, and we add the object to the peer's set + St + { totalNumObjectsToReq = totalNumObjectsToReq + 1 + , objectMultiplicity = objectMultiplicity + 1 + , peersToObjectsToReq = Map.insert peerAddr (Set.insert objectId objectsToReq) peersToObjectsToReq + } + -- Or we keep the state as is if we don't select this peer + else st + + in (globalState{dgsRng = dgsRng'}, peersToObjectsToReq) + +data St peerAddr objectId = St { totalNumObjectsToReq :: !NumObjectsReq, objectMultiplicity :: ObjectMultiplicity, peersToObjectsToReq :: Map peerAddr (Set objectId) } + +------------------------------------------------------------------------------- +-- OLD STUFF ONLY HERE FOR REFERENCE +------------------------------------------------------------------------------- -- | Internal state of `pickObjectsToDownload` computation. data DecisionInternalState peerAddr objectId object @@ -198,15 +367,14 @@ pickObjectsToDownload , ( (peerAddr, peerObjectState') , PeerDecision - { pdIdsToAck = numObjectIdsToAck - , pdIdsToReq = numObjectIdsToReq - , pdCanPipelineIdsReq = + { pdNumIdsToAck = numObjectIdsToAck + , pdNumIdsToReq = numObjectIdsToReq + , pdCanPipelineIdsRequests = not . StrictSeq.null . dpsOutstandingFifo $ peerObjectState' , pdObjectsToReqIds = Set.empty - , pdObjectsToSubmitToPoolIds = pdObjectsToSubmitToPoolIds } ) ) @@ -304,15 +472,14 @@ pickObjectsToDownload , ( (peerAddr, peerObjectState'') , PeerDecision - { pdIdsToAck = numObjectIdsToAck - , pdCanPipelineIdsReq = + { pdNumIdsToAck = numObjectIdsToAck + , pdCanPipelineIdsRequests = not . StrictSeq.null . dpsOutstandingFifo $ peerObjectState'' - , pdIdsToReq = numObjectIdsToReq + , pdNumIdsToReq = numObjectIdsToReq , pdObjectsToReqIds = pdObjectsToReqIdsMap - , pdObjectsToSubmitToPoolIds = pdObjectsToSubmitToPoolIds } ) ) @@ -363,17 +530,16 @@ pickObjectsToDownload in ( sharedState { dgsPeerStates = dgsPeerStates' , dgsObjectsInflightMultiplicities = disObjectsInflightMultiplicities - , dgsObjectsPendingMultiplicities = dgsObjectsPendingMultiplicities' , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' } , -- exclude empty results mapMaybe ( \((a, _), b) -> case b of PeerDecision - { pdIdsToAck = 0 - , pdIdsToReq = 0 + { pdNumIdsToAck = 0 + , pdNumIdsToReq = 0 , pdObjectsToReqIds - , pdObjectsToSubmitToPoolIds } + } | null pdObjectsToReqIds , Map.null pdObjectsToSubmitToPoolIds -> Nothing @@ -387,7 +553,7 @@ pickObjectsToDownload Map objectId Int -> (a, PeerDecision objectId object) -> Map objectId Int - updateInSubmissionToObjectPoolObjects m (_, PeerDecision{pdObjectsToSubmitToPoolIds}) = + updateInSubmissionToObjectPoolObjects m (_, PeerDecision{}) = List.foldl' fn m (Map.toList pdObjectsToSubmitToPoolIds) where fn :: @@ -435,11 +601,11 @@ filterActivePeers dpsNumIdsInflight == 0 -- if a peer has objectIds in-flight, we cannot request more objectIds or objects. && dpsNumIdsInflight + numOfUnacked <= dpMaxNumObjectsOutstanding - && pdIdsToReq > 0 + && pdNumIdsToReq > 0 where -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. - (pdIdsToReq, _, unackedObjectIds) = splitAcknowledgedObjectIds decisionPolicy globalState peerObjectState + (pdNumIdsToReq, _, unackedObjectIds) = splitAcknowledgedObjectIds decisionPolicy globalState peerObjectState numOfUnacked = fromIntegral (StrictSeq.length unackedObjectIds) gn :: DecisionPeerState objectId object -> Bool @@ -452,7 +618,7 @@ filterActivePeers } = ( dpsNumIdsInflight == 0 && dpsNumIdsInflight + numOfUnacked <= dpMaxNumObjectsOutstanding - && pdIdsToReq > 0 + && pdNumIdsToReq > 0 ) || (not (Set.null downloadable)) where @@ -466,7 +632,7 @@ filterActivePeers -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. - (pdIdsToReq, _, _) = splitAcknowledgedObjectIds decisionPolicy globalState peerObjectState + (pdNumIdsToReq, _, _) = splitAcknowledgedObjectIds decisionPolicy globalState peerObjectState -- -- Auxiliary functions @@ -525,15 +691,14 @@ acknowledgeObjectIds ps@DecisionPeerState { dpsObjectsAvailableIds , dpsNumIdsInflight - , dpsObjectsPending , dpsObjectsOwtPool } = -- We can only acknowledge objectIds when we can request new ones, since -- a `MsgRequestObjectIds` for 0 objectIds is a protocol error. - if pdIdsToReq > 0 + if pdNumIdsToReq > 0 then - ( pdIdsToAck - , pdIdsToReq + ( pdNumIdsToAck + , pdNumIdsToReq , objectsOwtPool , refCountDiff , ps @@ -541,8 +706,7 @@ acknowledgeObjectIds , dpsObjectsAvailableIds = dpsObjectsAvailableIds' , dpsNumIdsInflight = dpsNumIdsInflight - + pdIdsToReq - , dpsObjectsPending = dpsObjectsPending' + + pdNumIdsToReq , dpsObjectsOwtPool = dpsObjectsOwtPool' } ) @@ -556,7 +720,7 @@ acknowledgeObjectIds where -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which -- can be acknowledged and the unacknowledged `objectId`s. - (pdIdsToReq, acknowledgedObjectIds, dpsOutstandingFifo') = + (pdNumIdsToReq, acknowledgedObjectIds, dpsOutstandingFifo') = splitAcknowledgedObjectIds decisionPolicy globalState ps objectsOwtPoolList = @@ -601,8 +765,8 @@ acknowledgeObjectIds fn Nothing = Just 1 fn (Just n) = Just $! n + 1 - pdIdsToAck :: NumObjectIdsAck - pdIdsToAck = fromIntegral $ StrictSeq.length acknowledgedObjectIds + pdNumIdsToAck :: NumObjectIdsAck + pdNumIdsToAck = fromIntegral $ StrictSeq.length acknowledgedObjectIds -- | Split unacknowledged objectIds into acknowledged and unacknowledged parts, also -- return number of objectIds which can be requested. @@ -620,15 +784,14 @@ splitAcknowledgedObjectIds , dpMaxNumObjectIdsReq } DecisionGlobalState - { dgsObjectsPendingMultiplicities + { } DecisionPeerState { dpsOutstandingFifo - , dpsObjectsPending , dpsObjectsInflightIds , dpsNumIdsInflight } = - (pdIdsToReq, acknowledgedObjectIds', dpsOutstandingFifo') + (pdNumIdsToReq, acknowledgedObjectIds', dpsOutstandingFifo') where (acknowledgedObjectIds', dpsOutstandingFifo') = StrictSeq.spanl @@ -644,7 +807,7 @@ splitAcknowledgedObjectIds numOfAcked = StrictSeq.length acknowledgedObjectIds' unackedAndRequested = fromIntegral numOfUnacked + dpsNumIdsInflight - pdIdsToReq = + pdNumIdsToReq = assert (unackedAndRequested <= dpMaxNumObjectsOutstanding) $ assert (dpsNumIdsInflight <= dpMaxNumObjectIdsReq) $ (dpMaxNumObjectsOutstanding - unackedAndRequested + fromIntegral numOfAcked) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index dd12072f60..5a9e94d1df 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -58,8 +58,7 @@ data PeerStateAPI m objectId object = PeerStateAPI -- ^ a blocking action which reads `PeerDecision` , psaOnRequestIds :: NumObjectIdsReq -> m () , psaOnRequestObjects :: Set objectId -> m () - , psaSubmitObjectsToPool :: StrictSeq objectId -> m () - , psaOnReceivedIds :: [objectId] -> m () + , psaOnReceivedIds :: NumObjectIdsReq -> [objectId] -> m () -- ^ Error handling should have been done before calling this , psaOnReceivedObjects :: [object] -> m () -- ^ Error handling should have been done before calling this @@ -70,7 +69,7 @@ data PeerStateAPI m objectId object = PeerStateAPI -- `DecisionGlobalStateVar` and `PeerDecisionChannelsVar`s, which exposes `PeerStateAPI`. -- `PeerStateAPI` is only safe inside the `withPeer` scope. withPeer :: - forall object peerAddr objectId ticketNo m a. + forall object peerAddr objectId m a. ( MonadMask m , MonadMVar m , MonadSTM m @@ -81,11 +80,10 @@ withPeer :: Tracer m (TraceDecisionLogic peerAddr objectId object) -> Tracer m (TraceObjectDiffusionInbound objectId object) -> PeerDecisionChannelsVar m peerAddr objectId object -> - ObjectPoolSem m -> DecisionPolicy -> DecisionGlobalStateVar m peerAddr objectId object -> - ObjectPoolReader objectId object ticketNo m -> ObjectPoolWriter objectId object m -> + ObjectPoolSem m -> -- | new peer peerAddr -> -- | callback which gives access to `PeerStateAPI` @@ -95,11 +93,10 @@ withPeer decisionTracer objectDiffusionTracer decisionChannelsVar - objectPoolSem _decisionPolicy globalStateVar - _objectPoolReader objectPoolWriter + objectPoolSem peerAddr withAPI = bracket registerPeerAndCreateAPI unregisterPeer withAPI @@ -136,25 +133,18 @@ withPeer globalStateVar objectPoolWriter peerAddr - , psaSubmitObjectsToPool = State.submitObjectsToPool - objectDiffusionTracer - decisionTracer - globalStateVar - objectPoolWriter - objectPoolSem - peerAddr , psaOnReceivedIds = State.onReceivedIds objectDiffusionTracer decisionTracer globalStateVar objectPoolWriter peerAddr - (error "TODO: provide the number of requested IDs") , psaOnReceivedObjects = State.onReceivedObjects objectDiffusionTracer decisionTracer globalStateVar objectPoolWriter + objectPoolSem peerAddr } ) @@ -257,11 +247,12 @@ decisionLogicThread :: ) => Tracer m (TraceDecisionLogic peerAddr objectId object) -> Tracer m ObjectDiffusionCounters -> + ObjectPoolWriter objectId object m -> DecisionPolicy -> PeerDecisionChannelsVar m peerAddr objectId object -> DecisionGlobalStateVar m peerAddr objectId object -> m Void -decisionLogicThread decisionTracer countersTracer decisionPolicy decisionChannelsVar globalStateVar = do +decisionLogicThread decisionTracer countersTracer ObjectPoolWriter{opwHasObject} decisionPolicy decisionChannelsVar globalStateVar = do labelThisThread "ObjectDiffusionInbound.decisionLogicThread" forever $ do -- We rate limit the decision making process, it could overwhelm the CPU @@ -269,16 +260,13 @@ decisionLogicThread decisionTracer countersTracer decisionPolicy decisionChannel threadDelay _DECISION_LOOP_DELAY -- Make decisions and update the global state var accordingly - (decisions, globalState') <- atomically $ do - globalState <- readTVar globalStateVar - let activePeers = filterActivePeers decisionPolicy globalState - - -- block until at least one peer is active - check (not (Map.null activePeers)) - - let (globalState', decisions) = makeDecisions decisionPolicy globalState activePeers - writeTVar globalStateVar globalState' - return (decisions, globalState') + (globalState', decisions) <- atomically $ do + hasObject <- opwHasObject + stateTVar + globalStateVar + \globalState -> + let (globalState', decisions) = makeDecisions hasObject decisionPolicy globalState + in ((globalState', decisions), globalState') traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "decisionLogicThread" globalState') traceWith decisionTracer (TraceDecisionLogicDecisionsMade decisions) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 788e2e9baa..d985cb43b6 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -194,24 +194,12 @@ onReceivedIdsImpl peerAddr dgsPeerStates - -- Filter out objects we are not interesting in downloading - newObjectsAvailableIds = - Set.filter (\objectId -> - (not . hasObject $ objectId) -- object isn't already in the object pool - && objectId `Set.notMember` dpsObjectsInflightIds -- object isn't in flight from current peer - - -- We keep as "available" objects that have already been downloaded - -- from other peers but haven't been added to the object pool yet. - -- (aka. are in dpsObjectsOwtPool). - -- That's because if a peer disconnects while the objects are pending - -- or owt pool, they will be lost forever, and other peers won't be - -- able to re-request them. - -- See discussion: - -- https://moduscreate.slack.com/archives/C0937JQQ1F0/p1760343643030879?thread_ts=1760105747.965519&cid=C0937JQQ1F0 - ) $ Set.fromList receivedIds - + -- Actually we don't need to filter out availableIds, because + -- makeDecisions is the only reader of dpsObjectsAvailableIds + -- and will filter it when needed with the actualized state of the object + -- pool. dpsObjectsAvailableIds' = - dpsObjectsAvailableIds `Set.union` newObjectsAvailableIds + dpsObjectsAvailableIds `Set.union` Set.fromList receivedIds -- Add received objectIds to `dpsOutstandingFifo`. dpsOutstandingFifo' = dpsOutstandingFifo <> StrictSeq.fromList receivedIds diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index f6a9cc6b3f..3b112b6e5f 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -202,11 +202,11 @@ newDecisionGlobalStateVar rng = -- expensive `makeDecision` computation will not need to take that peer into -- account. data PeerDecision objectId object = PeerDecision - { pdIdsToAck :: !NumObjectIdsAck + { pdNumIdsToAck :: !NumObjectIdsAck -- ^ objectId's to acknowledge - , pdIdsToReq :: !NumObjectIdsReq + , pdNumIdsToReq :: !NumObjectIdsReq -- ^ number of objectId's to request - , pdCanPipelineIdsReq :: !Bool + , pdCanPipelineIdsRequests :: !Bool -- ^ the object-submission protocol only allows to pipeline `objectId`'s requests -- if we have non-acknowledged `objectId`s. , pdObjectsToReqIds :: !(Set objectId) @@ -220,28 +220,28 @@ data PeerDecision objectId object = PeerDecision -- `DecisionPeerState` is updated. It is designed to work with `TMergeVar`s. instance Ord objectId => Semigroup (PeerDecision objectId object) where PeerDecision - { pdIdsToAck - , pdIdsToReq - , pdCanPipelineIdsReq = _ignored + { pdNumIdsToAck + , pdNumIdsToReq + , pdCanPipelineIdsRequests = _ignored , pdObjectsToReqIds } <> PeerDecision - { pdIdsToAck = pdIdsToAck' - , pdIdsToReq = pdIdsToReq' - , pdCanPipelineIdsReq = pdCanPipelineIdsReq' + { pdNumIdsToAck = pdNumIdsToAck' + , pdNumIdsToReq = pdNumIdsToReq' + , pdCanPipelineIdsRequests = pdCanPipelineIdsRequests' , pdObjectsToReqIds = pdObjectsToReqIds' } = PeerDecision - { pdIdsToAck = pdIdsToAck + pdIdsToAck' - , pdIdsToReq = pdIdsToReq + pdIdsToReq' - , pdCanPipelineIdsReq = pdCanPipelineIdsReq' + { pdNumIdsToAck = pdNumIdsToAck + pdNumIdsToAck' + , pdNumIdsToReq = pdNumIdsToReq + pdNumIdsToReq' + , pdCanPipelineIdsRequests = pdCanPipelineIdsRequests' , pdObjectsToReqIds = pdObjectsToReqIds <> pdObjectsToReqIds' } instance Ord objectId => Monoid (PeerDecision objectId object) where mempty = PeerDecision - { pdIdsToAck = 0 - , pdIdsToReq = 0 - , pdCanPipelineIdsReq = False + { pdNumIdsToAck = 0 + , pdNumIdsToReq = 0 + , pdCanPipelineIdsRequests = False , pdObjectsToReqIds = Set.empty } From 2377db91f305e536692f11ff576481733f1ce26e Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Tue, 14 Oct 2025 10:07:03 +0200 Subject: [PATCH 21/43] clean up code in decision process --- .../ObjectDiffusion/Inbound/V2/Decision.hs | 36 +++++++++++-------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index b943774bbb..84cbdf674b 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -149,6 +149,8 @@ orderPeers = undefined -- So if the requestIds doing the ack has been made before the requestObject, then the server -- won't be able to serve the object. +-- pdNumIdsToAck should probably be additive, because we can't recompute/recover how many ids were pre-acked before (as they have been removed from the FIFO and from dpsObjectsAvailableIds) + -- | This function could just be pure if it hadn't be for the rng used to order peers pickObjectsToReq :: forall peerAddr objectId object. @@ -162,14 +164,9 @@ pickObjectsToReq :: (DecisionGlobalState peerAddr objectId object , Map peerAddr (Set objectId)) pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dpMaxNumObjectsInflightTotal,dpMaxObjectInflightMultiplicity} globalState@DecisionGlobalState{dgsRng, dgsPeerStates, dgsObjectsInflightMultiplicities, dgsObjectsOwtPoolMultiplicities} = - -- objects that are inflight or owtPool for a given peer are no longer in dpsObjectsAvailableIds of this peer - -- so we only need to filter out dpsObjectsAvailableIds by removing the objects that are already in pool - - let objectsExpectedSoonMultiplicities = Map.unionWith (+) dgsObjectsInflightMultiplicities dgsObjectsOwtPoolMultiplicities - - (orderedPeers, dgsRng') = orderPeers dgsPeerStates dgsRng + let (orderedPeers, dgsRng') = orderPeers dgsPeerStates dgsRng - -- We want to map each objectId to the sorted list of peers that can provide it (and from which we should download them preferably) + -- We want to map each objectId to the sorted list of peers that can provide it -- For each peer we also indicate how many objects it has in flight at the moment -- We filter out here the objects that are already in pool objectsToSortedProviders :: Map objectId [(peerAddr, NumObjectsReq)] @@ -185,11 +182,13 @@ pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dp ) Map.empty orderedPeers - - totalNumObjectsInflight :: NumObjectsReq - totalNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 dgsObjectsInflightMultiplicities - - -- Now we combine these maps for easy fold + + -- We also want to know for each objects how many peers have it in the inflight or owtPool, + -- meaning that we should receive them soon. + objectsExpectedSoonMultiplicities :: Map objectId ObjectMultiplicity + objectsExpectedSoonMultiplicities = Map.unionWith (+) dgsObjectsInflightMultiplicities dgsObjectsOwtPoolMultiplicities + + -- Now we join objectsToSortedProviders and objectsExpectedSoonMultiplicities maps on objectId for easy fold objectsToProvidersAndExpectedMultiplicities :: Map objectId ([(peerAddr, NumObjectsReq)], ObjectMultiplicity) objectsToProvidersAndExpectedMultiplicities = Map.merge @@ -202,6 +201,7 @@ pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dp objectsToSortedProviders objectsExpectedSoonMultiplicities + -- Now we compute the actual attribution of downloads for peers St{peersToObjectsToReq} = -- We iterate over each objectId and the corresponding (providers, expectedMultiplicity) Map.foldlWithKey' @@ -209,8 +209,11 @@ pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dp -- reset the objectMultiplicity counter for each new objectId let st' = st{objectMultiplicity = 0} + -- We iterate over the list of providers, and pick them or not according to the current state + -- When a peer is selected as a provider for this objectId, we insert the objectId in the peer's set in peersToObjectsToReq (inside St) + -- So the result of the filtering of providers is part of the final St state in Foldable.foldl' - (howToFoldProviderList objectId expectedMultiplicity) + (howToFoldProviders objectId expectedMultiplicity) st' providers ) @@ -221,10 +224,13 @@ pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dp } objectsToProvidersAndExpectedMultiplicities + totalNumObjectsInflight :: NumObjectsReq + totalNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 dgsObjectsInflightMultiplicities + -- This function decides whether or not we should select a given peer as provider for the current objectId -- it takes into account if we are expecting to obtain the object from other sources (either inflight/owt pool already, or if the object will be requested from already selected peers in this given round) - howToFoldProviderList :: objectId -> ObjectMultiplicity -> St peerAddr objectId -> (peerAddr, NumObjectsReq) -> St peerAddr objectId - howToFoldProviderList objectId expectedMultiplicity st@St{totalNumObjectsToReq, objectMultiplicity, peersToObjectsToReq} (peerAddr, numObjectsInFlight) = + howToFoldProviders :: objectId -> ObjectMultiplicity -> St peerAddr objectId -> (peerAddr, NumObjectsReq) -> St peerAddr objectId + howToFoldProviders objectId expectedMultiplicity st@St{totalNumObjectsToReq, objectMultiplicity, peersToObjectsToReq} (peerAddr, numObjectsInFlight) = let -- see what has already been attributed to this peer objectsToReq = Map.findWithDefault Set.empty peerAddr peersToObjectsToReq From e1a59875a6a36e6f96f0928b48d08eaf47da9302 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Tue, 14 Oct 2025 11:12:06 +0200 Subject: [PATCH 22/43] Finalize new decision logic --- .../ObjectDiffusion/Inbound/V2.hs | 6 +- .../ObjectDiffusion/Inbound/V2.mermaid | 14 +- .../ObjectDiffusion/Inbound/V2/Decision.hs | 719 ++---------------- .../ObjectDiffusion/Inbound/V2/Registry.hs | 27 +- .../ObjectDiffusion/Inbound/V2/State.hs | 67 +- .../ObjectDiffusion/Inbound/V2/Types.hs | 31 - 6 files changed, 127 insertions(+), 737 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 2bca43e9de..335c85fe71 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -195,7 +195,7 @@ objectDiffusionInbound objectIdsMap = Map.fromList objectIds' when (StrictSeq.length receivedIdsSeq > fromIntegral objectIdsToReq) $ throwIO ProtocolErrorObjectIdsNotRequested - onReceivedIds objectIdsToReq receivedIdsSeq objectIdsMap + onReceiveIds objectIdsToReq receivedIdsSeq objectIdsMap goIdle ) goReqObjectIds @@ -253,7 +253,7 @@ objectDiffusionInbound objectIdsMap = Map.fromList objectIds unless (StrictSeq.length receivedIdsSeq <= fromIntegral objectIdsToReq) $ throwIO ProtocolErrorObjectIdsNotRequested - onReceivedIds objectIdsToReq receivedIdsSeq objectIdsMap + onReceiveIds objectIdsToReq receivedIdsSeq objectIdsMap k CollectObjects objectIds objects -> do let requested = Map.keysSet objectIds @@ -262,7 +262,7 @@ objectDiffusionInbound unless (Map.keysSet received `Set.isSubsetOf` requested) $ throwIO ProtocolErrorObjectNotRequested - mbe <- onReceivedObjects objectIds received + mbe <- onReceiveObjects objectIds received traceWith tracer $ TraceObjectDiffusionCollected (getId `map` objects) case mbe of -- one of `object`s had a wrong size diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid index 91d26e5700..71b9768535 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid @@ -8,30 +8,30 @@ flowchart TD H(dgsObjectsInflightMultiplicities) I(dgsObjectsOwtPoolMultiplicities) - EA{requestIds} + EA{onRequestIds} EA-->|+count| A + B -->|-ids| EA + C -->|-ids| EA - EB{handleReceiveIds} + EB{onReceiveIds} A -->|-count| EB EB -->|+ids| B IN1@{ shape: lin-cyl, label: "ids" } --o EB EB -->|+ids| C - EC{requestObjects} + EC{onRequestObjects} C -->|-ids| EC EC -->|+ids| D EC -->|+count| H - ED{handleReceiveObjects / submitToPool} + ED{onReceiveObjects / submitToPool} D -->|-ids| ED H -->|-count| ED IN2@{ shape: lin-cyl, label: "objects" } --o ED ED -->|+objects| F ED -->|+count| I - EE{makeDecisionPreAcknowledge} - B -->|-ids| EE - C -->|-ids| EE + EE{makeDecisions} EE -.->|readDecision : pdIdsToAck + pdIdsToReq + pdCanPipelineIdsReq/| EA EE -.->|readDecision : pdObjectsToReqIds| EC diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 84cbdf674b..248096933f 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -14,8 +14,6 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision -- * Internal API exposed for testing , makeDecisions - , filterActivePeers - , pickObjectsToDownload ) where import Control.Arrow ((>>>)) @@ -37,6 +35,10 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Network.Protocol.ObjectDiffusion.Type import System.Random (random, StdGen) +import Data.Sequence.Strict (StrictSeq) + +strictSeqToSet :: Ord a => StrictSeq a -> Set a +strictSeqToSet = Set.fromList . Foldable.toList -- | Make download decisions. makeDecisions :: @@ -54,16 +56,17 @@ makeDecisions :: , Map peerAddr (PeerDecision objectId object) ) makeDecisions hasObject decisionPolicy globalState = - -- We do it in two steps, because pre-acknowledging will remove objects from dpsObjectsAvailableIds sets of each peer, - -- that the pickObjectsToReq function will use to decide which objects can be requested. - let (globalState', ackAndRequestIdsDecisions) = preAcknowledge hasObject decisionPolicy globalState - (globalState'', objectsToReqSets) = pickObjectsToReq hasObject decisionPolicy globalState' - in (globalState'', Map.intersectionWith (\decision objectsToReqIds -> decision{ pdObjectsToReqIds = objectsToReqIds }) ackAndRequestIdsDecisions objectsToReqSets) - --- | We pre-acknowledge the longest prefix of outstandingFifo of each peer that match the following criteria: + -- We do it in two steps, because computing the acknowledgment tell which objects from dpsObjectsAvailableIds sets of each peer won't actually be available anymore (as soon as we ack them), + -- so that the pickObjectsToReq function can take this into account. + let (ackAndRequestIdsDecisions, peerToIdsToAck) = computeAck hasObject decisionPolicy globalState + (globalState', peersToObjectsToReq) = pickObjectsToReq hasObject decisionPolicy globalState peerToIdsToAck + completeDecisions = Map.intersectionWith (\decision objectsToReqIds -> decision{ pdObjectsToReqIds = objectsToReqIds }) ackAndRequestIdsDecisions peersToObjectsToReq + in (globalState', completeDecisions) + +-- | The ids to ack are the longest prefix of outstandingFifo of each peer that match the following criteria: -- * either the object is owt pool for the peer who has downloaded it -- * or the object is already in pool -preAcknowledge :: +computeAck :: forall peerAddr objectId object. ( Ord peerAddr , Ord objectId @@ -71,56 +74,44 @@ preAcknowledge :: (objectId -> Bool) -> DecisionPolicy -> DecisionGlobalState peerAddr objectId object -> - ( DecisionGlobalState peerAddr objectId object - , Map peerAddr (PeerDecision objectId object) + ( Map peerAddr (PeerDecision objectId object) + , Map peerAddr (Set objectId) ) -preAcknowledge poolHasObject decisionPolicy globalState@DecisionGlobalState{dgsPeerStates} = - -- We use `Map.mapAccumWithKey` to traverse the peer states and both update - -- the peer states and accumulate ack decisions made along the way. - let (decisions, dgsPeerStates') = - Map.mapAccumWithKey preAcknowledgeForPeer Map.empty dgsPeerStates - in ( globalState{dgsPeerStates = dgsPeerStates'} - , decisions +computeAck poolHasObject DecisionPolicy{dpMaxNumObjectIdsReq, dpMaxNumObjectsOutstanding} DecisionGlobalState{dgsPeerStates} = + let (decisions, peerToIdsToAck) = + Map.foldlWithKey' computeAckForPeer (Map.empty, Map.empty) dgsPeerStates + in ( decisions + , peerToIdsToAck ) where - preAcknowledgeForPeer :: + computeAckForPeer :: -- | Accumulator containing decisions already made for other peers -- It's a map in which we need to insert the new decision into - Map peerAddr (PeerDecision objectId object) -> + (Map peerAddr (PeerDecision objectId object), Map peerAddr (Set objectId)) -> peerAddr -> DecisionPeerState objectId object -> - (Map peerAddr (PeerDecision objectId object), DecisionPeerState objectId object) - preAcknowledgeForPeer decisionsAcc peerAddr peerState@DecisionPeerState{dpsOutstandingFifo, dpsObjectsAvailableIds, dpsObjectsOwtPool} = + (Map peerAddr (PeerDecision objectId object), Map peerAddr (Set objectId)) + computeAckForPeer (decisionsAcc, peerToIdsToAck) peerAddr DecisionPeerState{dpsOutstandingFifo, dpsObjectsOwtPool, dpsNumIdsInflight} = let - -- we isolate the longest prefix of outstandingFifo that matches our ack criteria (see above in preAcknowledge doc) + -- we isolate the longest prefix of outstandingFifo that matches our ack criteria (see above in computeAck doc) (idsToAck, dpsOutstandingFifo') = StrictSeq.spanl (\objectId -> poolHasObject objectId || objectId `Map.member` dpsObjectsOwtPool) dpsOutstandingFifo - -- we remove the acknowledged ids from dpsObjectsAvailableIds if they were present - -- we need to do that because objects that were advertised by this corresponding outbound peer - -- but never downloaded because we already have them in pool were consequently never removed - -- from dpsObjectsAvailableIds by onRequestObjects - dpsObjectsAvailableIds' = - Foldable.foldl' (\set objectId -> Set.delete objectId set) dpsObjectsAvailableIds idsToAck - - pdNumIdsToAck = fromIntegral $ StrictSeq.length idsToAck - -- should this be incremental or overwrite the previous value in the semigroup instance? - pdNumIdsToReq = - -- numOfUnacked = StrictSeq.length dpsOutstandingFifo - -- numOfAcked = StrictSeq.length acknowledgedObjectIds' - -- unackedAndRequested = fromIntegral numOfUnacked + dpsNumIdsInflight + futureFifoSizeOnOutboundPeer :: NumObjectIdsReq = + -- the new known fifo state after we ack the idsToAck + (fromIntegral $ StrictSeq.length dpsOutstandingFifo') + -- plus the number of ids that we have already requested but we didn't receive yet + -- that the outbound peer might consequently already have added to its fifo + + dpsNumIdsInflight - -- pdNumIdsToReq = - -- assert (unackedAndRequested <= dpMaxNumObjectsOutstanding) $ - -- assert (dpsNumIdsInflight <= dpMaxNumObjectIdsReq) $ - -- (dpMaxNumObjectsOutstanding - unackedAndRequested + fromIntegral numOfAcked) - -- `min` (dpMaxNumObjectIdsReq - dpsNumIdsInflight) - undefined -- TODO + pdNumIdsToReq = + (fromIntegral dpMaxNumObjectsOutstanding - futureFifoSizeOnOutboundPeer) + `min` dpMaxNumObjectIdsReq pdCanPipelineIdsRequests = not . StrictSeq.null $ dpsOutstandingFifo' @@ -131,12 +122,7 @@ preAcknowledge poolHasObject decisionPolicy globalState@DecisionGlobalState{dgsP , pdObjectsToReqIds = Set.empty -- we don't decide this here } - peerState' = peerState - { dpsOutstandingFifo = dpsOutstandingFifo' - , dpsObjectsAvailableIds = dpsObjectsAvailableIds' - } - - in (Map.insert peerAddr peerDecision decisionsAcc, peerState') + in (Map.insert peerAddr peerDecision decisionsAcc, Map.insert peerAddr (strictSeqToSet idsToAck) peerToIdsToAck) orderPeers :: Map peerAddr (DecisionPeerState objectId object) -> StdGen -> ([(peerAddr, DecisionPeerState objectId object)], StdGen) orderPeers = undefined @@ -161,24 +147,35 @@ pickObjectsToReq :: (objectId -> Bool) -> DecisionPolicy -> DecisionGlobalState peerAddr objectId object -> - (DecisionGlobalState peerAddr objectId object - , Map peerAddr (Set objectId)) -pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dpMaxNumObjectsInflightTotal,dpMaxObjectInflightMultiplicity} globalState@DecisionGlobalState{dgsRng, dgsPeerStates, dgsObjectsInflightMultiplicities, dgsObjectsOwtPoolMultiplicities} = - let (orderedPeers, dgsRng') = orderPeers dgsPeerStates dgsRng + -- | map from peer to the set of ids that will be acked for that peer on next requestIds + -- we should treat these ids as not available anymore for the purpose of picking objects to request + Map peerAddr (Set objectId) -> + -- | new global state (with just RNG updated), and objects to request from each peer + (DecisionGlobalState peerAddr objectId object, Map peerAddr (Set objectId)) +pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dpMaxNumObjectsInflightTotal,dpMaxObjectInflightMultiplicity} globalState@DecisionGlobalState{dgsRng, dgsPeerStates, dgsObjectsInflightMultiplicities, dgsObjectsOwtPoolMultiplicities} peerToIdsToAck = + (globalState{dgsRng = dgsRng'}, peersToObjectsToReq) + where + (orderedPeers, dgsRng') = orderPeers dgsPeerStates dgsRng -- We want to map each objectId to the sorted list of peers that can provide it -- For each peer we also indicate how many objects it has in flight at the moment -- We filter out here the objects that are already in pool objectsToSortedProviders :: Map objectId [(peerAddr, NumObjectsReq)] objectsToSortedProviders = - -- We iterate over each peer and the corresponding dpsObjectsAvailableIds + -- We iterate over each peer and the corresponding available ids + -- and turn the map "inside-out" Foldable.foldl' ( \accMap (peerAddr, DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds}) -> - -- For each peer, we iterate over dpsObjectsAvailableIds filtered from the objects that are already in pool - Foldable.foldl' - (\accMap' objectId -> Map.insertWith (++) objectId [(peerAddr, fromIntegral $ Set.size dpsObjectsInflightIds)] accMap') - accMap - (Set.filter (not . poolHasObject) dpsObjectsAvailableIds) + let -- ids that will be acked for this peer won't be available anymore, so we should not consider them in the decision logic + idsToAckForThisPeer = Map.findWithDefault (error "invariant violated: peer must be in peerToIdsToAck map") peerAddr peerToIdsToAck + -- we should also remove objects that are already in the pool + interestingAndAvailableObjectIds = Set.filter (not . poolHasObject) $ + dpsObjectsAvailableIds `Set.difference` idsToAckForThisPeer + in -- we iterate over interestingAndAvailableObjectIds and add the peer to the list of providers for each object it can provide + Foldable.foldl' + (\accMap' objectId -> Map.insertWith (++) objectId [(peerAddr, fromIntegral $ Set.size dpsObjectsInflightIds)] accMap') + accMap + interestingAndAvailableObjectIds ) Map.empty orderedPeers @@ -202,7 +199,7 @@ pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dp objectsExpectedSoonMultiplicities -- Now we compute the actual attribution of downloads for peers - St{peersToObjectsToReq} = + DownloadPickState{peersToObjectsToReq} = -- We iterate over each objectId and the corresponding (providers, expectedMultiplicity) Map.foldlWithKey' ( \st objectId (providers, expectedMultiplicity) -> @@ -217,7 +214,7 @@ pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dp st' providers ) - St{ + DownloadPickState{ totalNumObjectsToReq = 0 , objectMultiplicity = 0 , peersToObjectsToReq = Map.empty @@ -229,8 +226,8 @@ pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dp -- This function decides whether or not we should select a given peer as provider for the current objectId -- it takes into account if we are expecting to obtain the object from other sources (either inflight/owt pool already, or if the object will be requested from already selected peers in this given round) - howToFoldProviders :: objectId -> ObjectMultiplicity -> St peerAddr objectId -> (peerAddr, NumObjectsReq) -> St peerAddr objectId - howToFoldProviders objectId expectedMultiplicity st@St{totalNumObjectsToReq, objectMultiplicity, peersToObjectsToReq} (peerAddr, numObjectsInFlight) = + howToFoldProviders :: objectId -> ObjectMultiplicity -> DownloadPickState peerAddr objectId -> (peerAddr, NumObjectsReq) -> DownloadPickState peerAddr objectId + howToFoldProviders objectId expectedMultiplicity st@DownloadPickState{totalNumObjectsToReq, objectMultiplicity, peersToObjectsToReq} (peerAddr, numObjectsInFlight) = let -- see what has already been attributed to this peer objectsToReq = Map.findWithDefault Set.empty peerAddr peersToObjectsToReq @@ -245,7 +242,7 @@ pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dp in if shouldSelect then -- We increase both global count and per-object count, and we add the object to the peer's set - St + DownloadPickState { totalNumObjectsToReq = totalNumObjectsToReq + 1 , objectMultiplicity = objectMultiplicity + 1 , peersToObjectsToReq = Map.insert peerAddr (Set.insert objectId objectsToReq) peersToObjectsToReq @@ -253,593 +250,9 @@ pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dp -- Or we keep the state as is if we don't select this peer else st - in (globalState{dgsRng = dgsRng'}, peersToObjectsToReq) - -data St peerAddr objectId = St { totalNumObjectsToReq :: !NumObjectsReq, objectMultiplicity :: ObjectMultiplicity, peersToObjectsToReq :: Map peerAddr (Set objectId) } - -------------------------------------------------------------------------------- --- OLD STUFF ONLY HERE FOR REFERENCE -------------------------------------------------------------------------------- - --- | Internal state of `pickObjectsToDownload` computation. -data DecisionInternalState peerAddr objectId object - = DecisionInternalState - { disNumObjectsInflight :: !NumObjectsReq - -- ^ number of all `object`s in-flight. - , disObjectsInflightMultiplicities :: !(Map objectId ObjectMultiplicity) - -- ^ `objectId`s in-flight. - , disIdsToAckMultiplicities :: !(Map objectId ObjectMultiplicity) - -- ^ acknowledged `objectId` with multiplicities. It is used to update - -- `dgsObjectsPendingMultiplicities`. - , disObjectsOwtPoolIds :: Set objectId - -- ^ objects on their way to the objectpool. Used to prevent issueing new - -- fetch requests for them. - } - --- | Distribute `object`'s to download among available peers. Peers are considered --- in the given order. --- --- * pick objects from the set of available object's (in `objectId` order, note these sets --- might be different for different peers). --- * pick objects until the peers in-flight limit (we can go over the limit by one object) --- (`dpMaxNumObjectsInflightPerPeer` limit) --- * pick objects until the overall in-flight limit (we can go over the limit by one object) --- (`dpMaxNumObjectsInflightTotal` limit) --- * each object can be downloaded simultaneously from at most --- `dpMaxObjectInflightMultiplicity` peers. -pickObjectsToDownload :: - forall peerAddr objectId object. - ( Ord peerAddr - , Ord objectId - ) => - -- | decision decisionPolicy - DecisionPolicy -> - -- | shared state - DecisionGlobalState peerAddr objectId object -> - [(peerAddr, DecisionPeerState objectId object)] -> - ( DecisionGlobalState peerAddr objectId object - , [(peerAddr, PeerDecision objectId object)] - ) -pickObjectsToDownload - decisionPolicy@DecisionPolicy - { dpMaxNumObjectsInflightPerPeer - , dpMaxNumObjectsInflightTotal - , dpMaxObjectInflightMultiplicity +data DownloadPickState peerAddr objectId = + DownloadPickState + { totalNumObjectsToReq :: !NumObjectsReq, + objectMultiplicity :: ObjectMultiplicity, + peersToObjectsToReq :: Map peerAddr (Set objectId) } - sharedState@DecisionGlobalState - { dgsPeerStates - , dgsObjectsInflightMultiplicities - , dgsObjectsOwtPoolMultiplicities - } = - -- outer fold: fold `[(peerAddr, DecisionPeerState objectId object)]` - List.mapAccumR - accumFn - -- initial state - DecisionInternalState - { disObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities - , disNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 dgsObjectsInflightMultiplicities - -- Thomas: not sure here if we must count disctinct objects in flight, or total number of objects in flight (considering multiplicities) - , disIdsToAckMultiplicities = Map.empty - , disObjectsOwtPoolIds = Map.keysSet dgsObjectsOwtPoolMultiplicities - } - >>> gn - where - accumFn :: - DecisionInternalState peerAddr objectId object -> - (peerAddr, DecisionPeerState objectId object) -> - ( DecisionInternalState peerAddr objectId object - , ( (peerAddr, DecisionPeerState objectId object) - , PeerDecision objectId object - ) - ) - accumFn - st@DecisionInternalState - { disObjectsInflightMultiplicities - , disNumObjectsInflight - , disIdsToAckMultiplicities - , disObjectsOwtPoolIds - } - ( peerAddr - , peerObjectState@DecisionPeerState - { dpsObjectsAvailableIds - , dpsObjectsInflightIds - } - ) = - let sizeInflightAll :: NumObjectsReq - sizeInflightOther :: NumObjectsReq - - sizeInflightAll = disNumObjectsInflight - sizeInflightOther = sizeInflightAll - fromIntegral (Set.size dpsObjectsInflightIds) - in if sizeInflightAll >= dpMaxNumObjectsInflightTotal - then - let ( numObjectIdsToAck - , numObjectIdsToReq - , pdObjectsToSubmitToPoolIds - , RefCountDiff{rcdIdsToAckMultiplicities} - , peerObjectState' - ) = acknowledgeObjectIds decisionPolicy sharedState peerObjectState - - disIdsToAckMultiplicities' = Map.unionWith (+) disIdsToAckMultiplicities rcdIdsToAckMultiplicities - disObjectsOwtPoolIds' = - disObjectsOwtPoolIds - <> Map.keysSet pdObjectsToSubmitToPoolIds - in if dpsNumIdsInflight peerObjectState' > 0 - then - -- we have objectIds to request - ( st - { disIdsToAckMultiplicities = disIdsToAckMultiplicities' - , disObjectsOwtPoolIds = disObjectsOwtPoolIds' - } - , - ( (peerAddr, peerObjectState') - , PeerDecision - { pdNumIdsToAck = numObjectIdsToAck - , pdNumIdsToReq = numObjectIdsToReq - , pdCanPipelineIdsRequests = - not - . StrictSeq.null - . dpsOutstandingFifo - $ peerObjectState' - , pdObjectsToReqIds = Set.empty - } - ) - ) - else - -- there are no `objectId`s to request, nor we can request `object`s due - -- to in-flight size limits - ( st - , - ( (peerAddr, peerObjectState') - , mempty - ) - ) - else - let dpsObjectsInflightIdsNum' :: NumObjectsReq - pdObjectsToReqIdsMap :: Set objectId - - (dpsObjectsInflightIdsNum', pdObjectsToReqIdsMap) = - -- inner fold: fold available `objectId`s - -- - -- Note: although `Map.foldrWithKey` could be used here, it - -- does not allow to short circuit the fold, unlike - -- `foldWithState`. - foldWithState - ( \(objectId, (_objectSize, inflightMultiplicity)) sizeInflight -> - if -- note that we pick `objectId`'s as long the `s` is - -- smaller or equal to `dpMaxNumObjectsInflightPerPeer`. - sizeInflight <= dpMaxNumObjectsInflightPerPeer - -- overall `object`'s in-flight must be smaller than - -- `dpMaxNumObjectsInflightTotal` - && sizeInflight + sizeInflightOther <= dpMaxNumObjectsInflightTotal - -- the object must not be downloaded from more - -- than `dpMaxObjectInflightMultiplicity` peers simultaneously - && inflightMultiplicity < dpMaxObjectInflightMultiplicity - -- TODO: we must validate that `objectSize` is smaller than - -- maximum objects size - then Just (sizeInflight + objectSize, (objectId, objectSize)) - else Nothing - ) - ( Map.assocs $ - -- merge `dpsObjectsAvailableIds` with `disObjectsInflightMultiplicities`, so we don't - -- need to lookup into `disObjectsInflightMultiplicities` on every `objectId` which - -- is in `dpsObjectsAvailableIds`. - Map.merge - (Map.mapMaybeMissing \_objectId -> Just . (,0)) - Map.dropMissing - (Map.zipWithMatched \_objectId -> (,)) - dpsObjectsAvailableIds - disObjectsInflightMultiplicities - -- remove `object`s which were already downloaded by some - -- other peer or are in-flight or unknown by this peer. - `Set.unions` ( Map.keysSet dgsObjectsPendingMultiplicities - <> dpsObjectsInflightIds - <> dpsObjectsRequestedButNotReceivedIds - <> disObjectsOwtPoolIds - ) - ) - dpsObjectsInflightIdsNum - -- pick from `objectId`'s which are available from that given - -- peer. Since we are folding a dictionary each `objectId` - -- will be selected only once from a given peer (at least - -- in each round). - - pdObjectsToReqIds = Map.keysSet pdObjectsToReqIdsMap - peerObjectState' = peerObjectState {dpsObjectsInflightIds = dpsObjectsInflightIds <> pdObjectsToReqIds} - - ( numObjectIdsToAck - , numObjectIdsToReq - , pdObjectsToSubmitToPoolIds - , RefCountDiff{rcdIdsToAckMultiplicities} - , peerObjectState'' - ) = acknowledgeObjectIds decisionPolicy sharedState peerObjectState' - - disIdsToAckMultiplicities' = Map.unionWith (+) disIdsToAckMultiplicities rcdIdsToAckMultiplicities - - stInflightDelta :: Map objectId Int - stInflightDelta = Map.fromSet (\_ -> 1) pdObjectsToReqIds - -- note: this is right since every `objectId` - -- could be picked at most once - - disObjectsInflightMultiplicities' :: Map objectId Int - disObjectsInflightMultiplicities' = Map.unionWith (+) stInflightDelta disObjectsInflightMultiplicities - - disObjectsOwtPoolIds' = - disObjectsOwtPoolIds - <> Set.fromList (map fst pdObjectsToSubmitToPoolIds) - in if dpsNumIdsInflight peerObjectState'' > 0 - then - -- we can request `objectId`s & `object`s - ( DecisionInternalState - { disObjectsInflightMultiplicities = disObjectsInflightMultiplicities' - , disNumObjectsInflight = undefined - , disIdsToAckMultiplicities = disIdsToAckMultiplicities' - , disObjectsOwtPoolIds = disObjectsOwtPoolIds' - } - , - ( (peerAddr, peerObjectState'') - , PeerDecision - { pdNumIdsToAck = numObjectIdsToAck - , pdCanPipelineIdsRequests = - not - . StrictSeq.null - . dpsOutstandingFifo - $ peerObjectState'' - , pdNumIdsToReq = numObjectIdsToReq - , pdObjectsToReqIds = pdObjectsToReqIdsMap - } - ) - ) - else - -- there are no `objectId`s to request, only `object`s. - ( st - { disObjectsInflightMultiplicities = disObjectsInflightMultiplicities' - , disObjectsOwtPoolIds = disObjectsOwtPoolIds' - } - , - ( (peerAddr, peerObjectState'') - , mempty{pdObjectsToReqIds = pdObjectsToReqIdsMap} - ) - ) - - gn :: - ( DecisionInternalState peerAddr objectId object - , [((peerAddr, DecisionPeerState objectId object), PeerDecision objectId object)] - ) -> - ( DecisionGlobalState peerAddr objectId object - , [(peerAddr, PeerDecision objectId object)] - ) - gn - ( DecisionInternalState - { disObjectsInflightMultiplicities - , disIdsToAckMultiplicities - } - , as - ) = - let dgsPeerStates' = - Map.fromList ((\(a, _) -> a) <$> as) - <> dgsPeerStates - - dgsObjectsPendingMultiplicities' = - Map.merge - (Map.mapMaybeMissing \_ x -> Just x) - (Map.mapMaybeMissing \_ _ -> assert False Nothing) - ( Map.zipWithMaybeMatched \_ x y -> - if x > y - then Just $! x - y - else Nothing - ) - dgsObjectsPendingMultiplicities - disIdsToAckMultiplicities - - dgsObjectsOwtPoolMultiplicities' = - List.foldl' updateInSubmissionToObjectPoolObjects dgsObjectsOwtPoolMultiplicities as - in ( sharedState - { dgsPeerStates = dgsPeerStates' - , dgsObjectsInflightMultiplicities = disObjectsInflightMultiplicities - , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' - } - , -- exclude empty results - mapMaybe - ( \((a, _), b) -> case b of - PeerDecision - { pdNumIdsToAck = 0 - , pdNumIdsToReq = 0 - , pdObjectsToReqIds - } - | null pdObjectsToReqIds - , Map.null pdObjectsToSubmitToPoolIds -> - Nothing - _ -> Just (a, b) - ) - as - ) - where - updateInSubmissionToObjectPoolObjects :: - forall a. - Map objectId Int -> - (a, PeerDecision objectId object) -> - Map objectId Int - updateInSubmissionToObjectPoolObjects m (_, PeerDecision{}) = - List.foldl' fn m (Map.toList pdObjectsToSubmitToPoolIds) - where - fn :: - Map objectId Int -> - (objectId, object) -> - Map objectId Int - fn x (objectId, _) = - Map.alter - ( \case - Nothing -> Just 1 - Just n -> Just $! succ n - ) - objectId - x - --- | Filter peers which can either download an `object` or acknowledge `objectId`s. -filterActivePeers :: - forall peerAddr objectId object. - Ord objectId => - HasCallStack => - DecisionPolicy -> - DecisionGlobalState peerAddr objectId object -> - Map peerAddr (DecisionPeerState objectId object) -filterActivePeers - decisionPolicy@DecisionPolicy - { dpMaxNumObjectsOutstanding - , dpMaxObjectInflightMultiplicity - } - globalState@DecisionGlobalState - { dgsPeerStates - , dgsObjectsInflightMultiplicities - , dgsObjectsOwtPoolMultiplicities - } - = Map.filter gn dgsPeerStates - where - unrequestable = - Map.keysSet (Map.filter (>= dpMaxObjectInflightMultiplicity) dgsObjectsInflightMultiplicities) - <> Map.keysSet dgsObjectsPendingMultiplicities - - fn :: DecisionPeerState objectId object -> Bool - fn - peerObjectState@DecisionPeerState - { dpsNumIdsInflight - } = - dpsNumIdsInflight == 0 - -- if a peer has objectIds in-flight, we cannot request more objectIds or objects. - && dpsNumIdsInflight + numOfUnacked <= dpMaxNumObjectsOutstanding - && pdNumIdsToReq > 0 - where - -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which - -- can be acknowledged and the unacknowledged `objectId`s. - (pdNumIdsToReq, _, unackedObjectIds) = splitAcknowledgedObjectIds decisionPolicy globalState peerObjectState - numOfUnacked = fromIntegral (StrictSeq.length unackedObjectIds) - - gn :: DecisionPeerState objectId object -> Bool - gn - peerObjectState@DecisionPeerState - { dpsOutstandingFifo - , dpsNumIdsInflight - , dpsObjectsInflightIds - , dpsObjectsAvailableIds - } = - ( dpsNumIdsInflight == 0 - && dpsNumIdsInflight + numOfUnacked <= dpMaxNumObjectsOutstanding - && pdNumIdsToReq > 0 - ) - || (not (Set.null downloadable)) - where - numOfUnacked = fromIntegral (StrictSeq.length dpsOutstandingFifo) - downloadable = - dpsObjectsAvailableIds - `Set.difference` dpsObjectsInflightIds - `Set.difference` dpsObjectsRequestedButNotReceivedIds - `Set.difference` unrequestable - `Set.difference` Map.keysSet dgsObjectsOwtPoolMultiplicities - - -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which - -- can be acknowledged and the unacknowledged `objectId`s. - (pdNumIdsToReq, _, _) = splitAcknowledgedObjectIds decisionPolicy globalState peerObjectState - --- --- Auxiliary functions --- - --- | A fold with state implemented as a `foldr` to take advantage of fold-build --- fusion optimisation. -foldWithState :: - forall s a b c. - Ord b => - (a -> s -> Maybe (s, (b, c))) -> - [a] -> - s -> - (s, Map b c) -{-# INLINE foldWithState #-} -foldWithState f = foldr cons nil - where - cons :: - a -> - (s -> (s, Map b c)) -> - (s -> (s, Map b c)) - cons a k = \ !s -> - case f a s of - Nothing -> nil s - Just (!s', (!b, !c)) -> - case Map.insert b c `second` k s' of - r@(!_s, !_bs) -> r - - nil :: s -> (s, Map b c) - nil = \ !s -> (s, Map.empty) - --- --- Pure public API --- - -acknowledgeObjectIds :: - forall peerAddr object objectId. - Ord objectId => - HasCallStack => - DecisionPolicy -> - DecisionGlobalState peerAddr objectId object -> - DecisionPeerState objectId object -> - -- | number of objectId to acknowledge, requests, objects which we can submit to the - -- objectpool, objectIds to acknowledge with multiplicities, updated DecisionPeerState. - ( NumObjectIdsAck - , NumObjectIdsReq - , Map objectId object - -- ^ objectsOwtPool - , RefCountDiff objectId - , DecisionPeerState objectId object - ) -{-# INLINE acknowledgeObjectIds #-} -acknowledgeObjectIds - decisionPolicy - globalState - ps@DecisionPeerState - { dpsObjectsAvailableIds - , dpsNumIdsInflight - , dpsObjectsOwtPool - } = - -- We can only acknowledge objectIds when we can request new ones, since - -- a `MsgRequestObjectIds` for 0 objectIds is a protocol error. - if pdNumIdsToReq > 0 - then - ( pdNumIdsToAck - , pdNumIdsToReq - , objectsOwtPool - , refCountDiff - , ps - { dpsOutstandingFifo = dpsOutstandingFifo' - , dpsObjectsAvailableIds = dpsObjectsAvailableIds' - , dpsNumIdsInflight = - dpsNumIdsInflight - + pdNumIdsToReq - , dpsObjectsOwtPool = dpsObjectsOwtPool' - } - ) - else - ( 0 - , 0 - , objectsOwtPool - , RefCountDiff Map.empty - , ps{dpsObjectsOwtPool = dpsObjectsOwtPool'} - ) - where - -- Split `dpsOutstandingFifo'` into the longest prefix of `objectId`s which - -- can be acknowledged and the unacknowledged `objectId`s. - (pdNumIdsToReq, acknowledgedObjectIds, dpsOutstandingFifo') = - splitAcknowledgedObjectIds decisionPolicy globalState ps - - objectsOwtPoolList = - [ (objectId, object) - | objectId <- toList toObjectPoolObjectIds - , objectId `Map.notMember` dgsObjectsPendingMultiplicities globalState - , object <- maybeToList $ objectId `Map.lookup` dpsObjectsPending - ] - (toObjectPoolObjectIds, _) = - StrictSeq.spanl (`Map.member` dpsObjectsPending) acknowledgedObjectIds - - objectsOwtPool = Map.fromList objectsOwtPoolList - - dpsObjectsOwtPool' = dpsObjectsOwtPool <> objectsOwtPool - - (dpsObjectsPending', ackedDownloadedObjects) = Map.partitionWithKey (\objectId _ -> objectId `Set.member` liveSet) dpsObjectsPending - -- lateObjects: objects which were downloaded by another peer before we - -- downloaded them; it relies on that `objectToObjectPool` filters out - -- `dgsObjectsPending`. - lateObjects = - Map.filterWithKey - (\objectId _ -> objectId `Map.notMember` objectsOwtPool) - ackedDownloadedObjects - - -- the set of live `objectIds` - liveSet = Set.fromList (toList dpsOutstandingFifo') - dpsObjectsAvailableIds' = dpsObjectsAvailableIds `Set.intersection` liveSet - - -- We remove all acknowledged `objectId`s which are not in - -- `dpsOutstandingFifo''`, but also return the unknown set before any - -- modifications (which is used to compute `dpsOutstandingFifo''` - -- above). - - refCountDiff = - RefCountDiff $ - foldr - (Map.alter fn) - Map.empty - acknowledgedObjectIds - where - fn :: Maybe Int -> Maybe Int - fn Nothing = Just 1 - fn (Just n) = Just $! n + 1 - - pdNumIdsToAck :: NumObjectIdsAck - pdNumIdsToAck = fromIntegral $ StrictSeq.length acknowledgedObjectIds - --- | Split unacknowledged objectIds into acknowledged and unacknowledged parts, also --- return number of objectIds which can be requested. -splitAcknowledgedObjectIds :: - Ord objectId => - HasCallStack => - DecisionPolicy -> - DecisionGlobalState peer objectId object -> - DecisionPeerState objectId object -> - -- | number of objectIds to request, acknowledged objectIds, unacknowledged objectIds - (NumObjectIdsReq, StrictSeq.StrictSeq objectId, StrictSeq.StrictSeq objectId) -splitAcknowledgedObjectIds - DecisionPolicy - { dpMaxNumObjectsOutstanding - , dpMaxNumObjectIdsReq - } - DecisionGlobalState - { - } - DecisionPeerState - { dpsOutstandingFifo - , dpsObjectsInflightIds - , dpsNumIdsInflight - } = - (pdNumIdsToReq, acknowledgedObjectIds', dpsOutstandingFifo') - where - (acknowledgedObjectIds', dpsOutstandingFifo') = - StrictSeq.spanl - ( \objectId -> - ( objectId `Map.member` dgsObjectsPendingMultiplicities - || objectId `Set.member` dpsObjectsRequestedButNotReceivedIds - || objectId `Map.member` dpsObjectsPending - ) - && objectId `Set.notMember` dpsObjectsInflightIds - ) - dpsOutstandingFifo - numOfUnacked = StrictSeq.length dpsOutstandingFifo - numOfAcked = StrictSeq.length acknowledgedObjectIds' - unackedAndRequested = fromIntegral numOfUnacked + dpsNumIdsInflight - - pdNumIdsToReq = - assert (unackedAndRequested <= dpMaxNumObjectsOutstanding) $ - assert (dpsNumIdsInflight <= dpMaxNumObjectIdsReq) $ - (dpMaxNumObjectsOutstanding - unackedAndRequested + fromIntegral numOfAcked) - `min` (dpMaxNumObjectIdsReq - dpsNumIdsInflight) - --- | `RefCountDiff` represents a map of `objectId` which can be acknowledged --- together with their multiplicities. -newtype RefCountDiff objectId = RefCountDiff - { rcdIdsToAckMultiplicities :: Map objectId Int - } - -updateRefCounts :: - Ord objectId => - Map objectId Int -> - RefCountDiff objectId -> - Map objectId Int -updateRefCounts dgsObjectsPendingMultiplicities (RefCountDiff diff) = - Map.merge - (Map.mapMaybeMissing \_ x -> Just x) - (Map.mapMaybeMissing \_ _ -> Nothing) - ( Map.zipWithMaybeMatched \_ x y -> - assert - (x >= y) - if x > y - then Just $! x - y - else Nothing - ) - dgsObjectsPendingMultiplicities - diff \ No newline at end of file diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 5a9e94d1df..54d0d3b93e 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -39,7 +39,7 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API import Control.Monad (forever) import Data.Set (Set) -import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq, NumObjectIdsAck) -- | Communication channels between `ObjectDiffusion` mini-protocol inbound side -- and decision logic. @@ -56,7 +56,7 @@ newPeerDecisionChannelsVar = newMVar (Map.empty) data PeerStateAPI m objectId object = PeerStateAPI { psaReadDecision :: m (PeerDecision objectId object) -- ^ a blocking action which reads `PeerDecision` - , psaOnRequestIds :: NumObjectIdsReq -> m () + , psaOnRequestIds :: NumObjectIdsAck -> NumObjectIdsReq -> m () , psaOnRequestObjects :: Set objectId -> m () , psaOnReceivedIds :: NumObjectIdsReq -> [objectId] -> m () -- ^ Error handling should have been done before calling this @@ -133,13 +133,13 @@ withPeer globalStateVar objectPoolWriter peerAddr - , psaOnReceivedIds = State.onReceivedIds + , psaOnReceivedIds = State.onReceiveIds objectDiffusionTracer decisionTracer globalStateVar objectPoolWriter peerAddr - , psaOnReceivedObjects = State.onReceivedObjects + , psaOnReceivedObjects = State.onReceiveObjects objectDiffusionTracer decisionTracer globalStateVar @@ -279,25 +279,10 @@ decisionLogicThread decisionTracer countersTracer ObjectPoolWriter{opwHasObject} decisions -- Send the decisions to the corresponding peers -- Note that decisions are incremental, so we merge the old one to the new one (using the semigroup instance) if there is an old one - traverse_ - (\(chan, newDecision) -> - modifyMVarWithDefault_ - chan newDecision (\oldDecision -> pure (oldDecision <> newDecision))) - peerToChannelAndDecision - + traverse_ (uncurry putMVar) peerToChannelAndDecision + traceWith countersTracer (makeObjectDiffusionCounters globalState') --- Variant of modifyMVar_ that puts a default value if the MVar is empty. -modifyMVarWithDefault_ :: (MonadMask m, MonadMVar m) => StrictMVar m a -> a -> (a -> m a) -> m () -modifyMVarWithDefault_ m d io = - mask $ \restore -> do - mbA <- tryTakeMVar m - case mbA of - Just a -> do - a' <- restore (io a) `onException` putMVar m a - putMVar m a' - Nothing -> putMVar m d - -- `5ms` delay _DECISION_LOOP_DELAY :: DiffTime _DECISION_LOOP_DELAY = 0.005 diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index d985cb43b6..ca0f2fff30 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -10,8 +10,8 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State , DecisionPeerState (..) , onRequestIds , onRequestObjects - , onReceivedIds - , onReceivedObjects + , onReceiveIds + , onReceiveObjects ) where import Control.Concurrent.Class.MonadSTM.Strict @@ -28,7 +28,7 @@ import GHC.Stack (HasCallStack) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (ObjectPoolWriter (..)) import Ouroboros.Consensus.Util.IOLike (MonadMask, MonadMVar, bracket_) -import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq, NumObjectIdsAck) onRequestIds :: forall m peerAddr object objectId. @@ -38,29 +38,34 @@ onRequestIds :: DecisionGlobalStateVar m peerAddr objectId object -> ObjectPoolWriter objectId object m -> peerAddr -> + NumObjectIdsAck -> -- | number of requests to req NumObjectIdsReq -> m () -onRequestIds odTracer decisionTracer globalStateVar _objectPoolWriter peerAddr numIdsToReq = do +onRequestIds odTracer decisionTracer globalStateVar _objectPoolWriter peerAddr numIdsToAck numIdsToReq = do globalState' <- atomically $ do stateTVar globalStateVar ( \globalState -> - let globalState' = onRequestIdsImpl peerAddr numIdsToReq globalState + let globalState' = onRequestIdsImpl peerAddr numIdsToAck numIdsToReq globalState in (globalState', globalState') ) traceWith odTracer (TraceObjectDiffusionInboundRequestedIds (fromIntegral numIdsToReq)) traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onRequestIds" globalState') +-- Acknowledgment is done when a requestIds is made. +-- That's why we update the dpsOutstandingFifo and dpsObjectsAvailableIds here. onRequestIdsImpl :: forall peerAddr object objectId. (Ord objectId, Ord peerAddr, HasCallStack) => peerAddr -> + NumObjectIdsAck -> -- | number of requests to req NumObjectIdsReq -> DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object onRequestIdsImpl peerAddr + numIdsToAck numIdsToReq globalState@DecisionGlobalState { dgsPeerStates @@ -71,7 +76,25 @@ onRequestIdsImpl where dgsPeerStates' = Map.adjust - (\ps@DecisionPeerState{dpsNumIdsInflight} -> ps{dpsNumIdsInflight = dpsNumIdsInflight + numIdsToReq}) + (\ps@DecisionPeerState{dpsNumIdsInflight, dpsOutstandingFifo, dpsObjectsAvailableIds} -> + -- we isolate the longest prefix of outstandingFifo that matches our ack criteria (see above in computeAck doc) + let -- We compute the ids to ack and new state of the FIFO based on the number of ids to ack given by the decision logic + (idsToAck, dpsOutstandingFifo') = + StrictSeq.splitAt + (fromIntegral numIdsToAck) + dpsOutstandingFifo + + -- We remove the acknowledged ids from dpsObjectsAvailableIds if they were present. + -- We need to do that because objects that were advertised by this corresponding outbound peer + -- but never downloaded because we already have them in pool were consequently never removed + -- from dpsObjectsAvailableIds by onRequestObjects + dpsObjectsAvailableIds' = + Foldable.foldl' (\set objectId -> Set.delete objectId set) dpsObjectsAvailableIds idsToAck + + in ps{dpsNumIdsInflight = dpsNumIdsInflight + numIdsToReq + , dpsOutstandingFifo = dpsOutstandingFifo' + , dpsObjectsAvailableIds = dpsObjectsAvailableIds'} + ) peerAddr dgsPeerStates @@ -129,10 +152,10 @@ onRequestObjectsImpl peerAddr dgsPeerStates --- | Wrapper around `onReceivedIdsImpl`. +-- | Wrapper around `onReceiveIdsImpl`. -- Obtain the `hasObject` function atomically from the STM context and -- updates and traces the global state TVar. -onReceivedIds :: +onReceiveIds :: forall m peerAddr object objectId. (MonadSTM m, Ord objectId, Ord peerAddr) => Tracer m (TraceObjectDiffusionInbound objectId object) -> @@ -147,17 +170,17 @@ onReceivedIds :: [objectId] -> -- | received `objectId`s m () -onReceivedIds odTracer decisionTracer globalStateVar objectPoolWriter peerAddr numIdsInitiallyRequested receivedIds = do +onReceiveIds odTracer decisionTracer globalStateVar objectPoolWriter peerAddr numIdsInitiallyRequested receivedIds = do globalState' <- atomically $ do hasObject <- opwHasObject objectPoolWriter stateTVar globalStateVar - ( \globalState -> let globalState' = onReceivedIdsImpl hasObject peerAddr numIdsInitiallyRequested receivedIds globalState + ( \globalState -> let globalState' = onReceiveIdsImpl hasObject peerAddr numIdsInitiallyRequested receivedIds globalState in (globalState', globalState') ) traceWith odTracer (TraceObjectDiffusionInboundReceivedIds (length receivedIds)) - traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onReceivedIds" globalState') + traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onReceiveIds" globalState') -onReceivedIdsImpl :: +onReceiveIdsImpl :: forall peerAddr object objectId. (Ord objectId, Ord peerAddr, HasCallStack) => -- | check if objectId is in the objectpool, ref @@ -171,7 +194,7 @@ onReceivedIdsImpl :: [objectId] -> DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object -onReceivedIdsImpl +onReceiveIdsImpl hasObject peerAddr numIdsInitiallyRequested @@ -190,7 +213,7 @@ onReceivedIdsImpl , dpsNumIdsInflight } = findWithDefault - (error "ObjectDiffusion.onReceivedIdsImpl: the peer should appear in dgsPeerStates") + (error "ObjectDiffusion.onReceiveIdsImpl: the peer should appear in dgsPeerStates") peerAddr dgsPeerStates @@ -215,7 +238,7 @@ onReceivedIdsImpl dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates --- | Wrapper around `onReceivedObjectsImpl` that updates and traces the +-- | Wrapper around `onReceiveObjectsImpl` that updates and traces the -- global state TVar. -- -- Error handling should be done by the client before using the API. @@ -223,7 +246,7 @@ onReceivedIdsImpl -- assert (objectsRequestedIds `Set.isSubsetOf` dpsObjectsInflightIds) -- -- IMPORTANT: We also assume that every object has been *validated* before being passed to this function. -onReceivedObjects :: +onReceiveObjects :: forall m peerAddr object objectId. ( MonadSTM m , MonadMask m @@ -240,7 +263,7 @@ onReceivedObjects :: -- | received objects [object] -> m () -onReceivedObjects odTracer tracer globalStateVar objectPoolWriter poolSem peerAddr objectsReceived = do +onReceiveObjects odTracer tracer globalStateVar objectPoolWriter poolSem peerAddr objectsReceived = do let getId = opwObjectId objectPoolWriter let objectsReceivedMap = Map.fromList $ (\obj -> (getId obj, obj)) <$> objectsReceived @@ -249,13 +272,13 @@ onReceivedObjects odTracer tracer globalStateVar objectPoolWriter poolSem peerAd globalStateVar ( \globalState -> let globalState' = - onReceivedObjectsImpl + onReceiveObjectsImpl peerAddr objectsReceivedMap globalState in (globalState', globalState')) traceWith odTracer (TraceObjectDiffusionInboundReceivedObjects (length objectsReceived)) - traceWith tracer (TraceDecisionLogicGlobalStateUpdated "onReceivedObjects" globalState') + traceWith tracer (TraceDecisionLogicGlobalStateUpdated "onReceiveObjects" globalState') submitObjectsToPool odTracer tracer @@ -265,7 +288,7 @@ onReceivedObjects odTracer tracer globalStateVar objectPoolWriter poolSem peerAd peerAddr objectsReceivedMap -onReceivedObjectsImpl :: +onReceiveObjectsImpl :: forall peerAddr object objectId. ( Ord peerAddr , Ord objectId @@ -275,7 +298,7 @@ onReceivedObjectsImpl :: Map objectId object -> DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object -onReceivedObjectsImpl +onReceiveObjectsImpl peerAddr objectsReceived st@DecisionGlobalState @@ -296,7 +319,7 @@ onReceivedObjectsImpl , dpsObjectsOwtPool } = findWithDefault - (error "ObjectDiffusion.onReceivedObjectsImpl: the peer should appear in dgsPeerStates") + (error "ObjectDiffusion.onReceiveObjectsImpl: the peer should appear in dgsPeerStates") peerAddr dgsPeerStates diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 3b112b6e5f..f71d317afd 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -214,37 +214,6 @@ data PeerDecision objectId object = PeerDecision } deriving (Show, Eq) --- | A non-commutative semigroup instance. --- --- /note:/ this instance must be consistent with `pickObjectsToDownload` and how --- `DecisionPeerState` is updated. It is designed to work with `TMergeVar`s. -instance Ord objectId => Semigroup (PeerDecision objectId object) where - PeerDecision - { pdNumIdsToAck - , pdNumIdsToReq - , pdCanPipelineIdsRequests = _ignored - , pdObjectsToReqIds - } - <> PeerDecision - { pdNumIdsToAck = pdNumIdsToAck' - , pdNumIdsToReq = pdNumIdsToReq' - , pdCanPipelineIdsRequests = pdCanPipelineIdsRequests' - , pdObjectsToReqIds = pdObjectsToReqIds' - } = - PeerDecision - { pdNumIdsToAck = pdNumIdsToAck + pdNumIdsToAck' - , pdNumIdsToReq = pdNumIdsToReq + pdNumIdsToReq' - , pdCanPipelineIdsRequests = pdCanPipelineIdsRequests' - , pdObjectsToReqIds = pdObjectsToReqIds <> pdObjectsToReqIds' - } -instance Ord objectId => Monoid (PeerDecision objectId object) where - mempty = PeerDecision - { pdNumIdsToAck = 0 - , pdNumIdsToReq = 0 - , pdCanPipelineIdsRequests = False - , pdObjectsToReqIds = Set.empty - } - -- | ObjectLogic tracer. data TraceDecisionLogic peerAddr objectId object = TraceDecisionLogicGlobalStateUpdated String (DecisionGlobalState peerAddr objectId object) From 70feb95160e9aecda205aa0498cf8ba841b943a3 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Tue, 14 Oct 2025 11:13:18 +0200 Subject: [PATCH 23/43] formatting --- .../ObjectDiffusion/Inbound/V2.hs | 45 +-- .../ObjectDiffusion/Inbound/V2/Decision.hs | 274 ++++++++++-------- .../ObjectDiffusion/Inbound/V2/Policy.hs | 8 +- .../ObjectDiffusion/Inbound/V2/Registry.hs | 143 ++++----- .../ObjectDiffusion/Inbound/V2/State.hs | 270 +++++++++-------- .../ObjectDiffusion/Inbound/V2/Types.hs | 57 ++-- 6 files changed, 432 insertions(+), 365 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 335c85fe71..85c1993aa6 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -1,11 +1,11 @@ {-# LANGUAGE BangPatterns #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE GADTs #-} +{-# LANGUAGE ImportQualifiedPost #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE ImportQualifiedPost #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 ( -- * ObjectDiffusion Inbound client @@ -27,29 +27,28 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 , defaultDecisionPolicy ) where +import Control.Concurrent.Class.MonadSTM (atomically) import Control.Exception (assert) import Control.Monad (unless, when) import Control.Monad.Class.MonadThrow import Control.Monad.Class.MonadTimer.SI import Control.Tracer (Tracer, traceWith) -import qualified Data.List.NonEmpty as NonEmpty -import qualified Data.Map.Strict as Map -import qualified Data.Sequence.Strict as StrictSeq -import qualified Data.Set as Set +import Data.List.NonEmpty qualified as NonEmpty +import Data.Map.Strict qualified as Map +import Data.Sequence.Strict qualified as StrictSeq +import Data.Set qualified as Set import Network.TypedProtocol import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State qualified as State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types as V2 -import Ouroboros.Network.Protocol.ObjectDiffusion.Inbound import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API -import Ouroboros.Network.ControlMessage (ControlMessageSTM, ControlMessage (..)) -import Control.Concurrent.Class.MonadSTM (atomically) - +import Ouroboros.Network.ControlMessage (ControlMessage (..), ControlMessageSTM) +import Ouroboros.Network.Protocol.ObjectDiffusion.Inbound -- | A object-submission inbound side (client). -- --- The goIdle' function blocks on receiving `PeerDecision` from the decision logic. +-- The goIdle' function blocks on receiving `PeerDecision` from the decision logic. objectDiffusionInbound :: forall objectId object ticketNo m. ( MonadDelay m @@ -78,7 +77,8 @@ objectDiffusionInbound NoObjectDiffusionInitDelay -> return () (goIdle Zero) where - goIdle :: forall (n :: N). + goIdle :: + forall (n :: N). Nat n -> m (InboundStIdle Z objectId object m ()) goIdle n = do @@ -90,7 +90,8 @@ objectDiffusionInbound pure $ terminateAfterDrain n -- Otherwise, we can continue the protocol normally. _continue -> goIdle' - goIdle' :: forall (n :: N). + goIdle' :: + forall (n :: N). Nat n -> m (InboundStIdle Z objectId object m ()) goIdle' n = do @@ -127,7 +128,7 @@ objectDiffusionInbound -- blocking call. traceWith tracer (TraceObjectDiffusionInboundCannotRequestMoreObjects (natToInt n)) goReqIdsBlocking decision - + -- We have pipelined some requests, so there are some replies in flight. n@(Succ _) -> if shouldRequestMoreObjects @@ -136,16 +137,16 @@ objectDiffusionInbound -- available, but there are objects to request too so we -- should *not* block waiting for replies. -- So we ask for new objects and objectIds in a pipelined way. - pure $ CollectPipelined - -- if no replies are available immediately, we continue with - (Just (goReqObjectsIdsPipelined n decision)) - -- if one reply is available, we go collect it. - -- We will continue to goIdle after; so in practice we will loop - -- until all immediately available replies have been collected - -- before requesting objects and ids in a pipelined fashion - (goCollect n decision) + pure $ + CollectPipelined + -- if no replies are available immediately, we continue with + (Just (goReqObjectsIdsPipelined n decision)) + -- if one reply is available, we go collect it. + -- We will continue to goIdle after; so in practice we will loop + -- until all immediately available replies have been collected + -- before requesting objects and ids in a pipelined fashion + (goCollect n decision) else do undefined - if Set.null pdObjectsToReqIds then goReqObjectIds Zero undefined diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 248096933f..d80699178e 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -19,23 +19,23 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision import Control.Arrow ((>>>)) import Control.Exception (assert) import Data.Bifunctor (second) +import Data.Foldable qualified as Foldable import Data.Hashable import Data.List qualified as List import Data.Map.Merge.Strict qualified as Map import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map import Data.Maybe (mapMaybe) +import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq import Data.Set (Set) import Data.Set qualified as Set -import Data.Foldable qualified as Foldable import GHC.Stack (HasCallStack) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Network.Protocol.ObjectDiffusion.Type -import System.Random (random, StdGen) -import Data.Sequence.Strict (StrictSeq) +import System.Random (StdGen, random) strictSeqToSet :: Ord a => StrictSeq a -> Set a strictSeqToSet = Set.fromList . Foldable.toList @@ -60,8 +60,12 @@ makeDecisions hasObject decisionPolicy globalState = -- so that the pickObjectsToReq function can take this into account. let (ackAndRequestIdsDecisions, peerToIdsToAck) = computeAck hasObject decisionPolicy globalState (globalState', peersToObjectsToReq) = pickObjectsToReq hasObject decisionPolicy globalState peerToIdsToAck - completeDecisions = Map.intersectionWith (\decision objectsToReqIds -> decision{ pdObjectsToReqIds = objectsToReqIds }) ackAndRequestIdsDecisions peersToObjectsToReq - in (globalState', completeDecisions) + completeDecisions = + Map.intersectionWith + (\decision objectsToReqIds -> decision{pdObjectsToReqIds = objectsToReqIds}) + ackAndRequestIdsDecisions + peersToObjectsToReq + in (globalState', completeDecisions) -- | The ids to ack are the longest prefix of outstandingFifo of each peer that match the following criteria: -- * either the object is owt pool for the peer who has downloaded it @@ -79,52 +83,57 @@ computeAck :: ) computeAck poolHasObject DecisionPolicy{dpMaxNumObjectIdsReq, dpMaxNumObjectsOutstanding} DecisionGlobalState{dgsPeerStates} = let (decisions, peerToIdsToAck) = - Map.foldlWithKey' computeAckForPeer (Map.empty, Map.empty) dgsPeerStates + Map.foldlWithKey' computeAckForPeer (Map.empty, Map.empty) dgsPeerStates in ( decisions , peerToIdsToAck ) - - where + where computeAckForPeer :: - -- | Accumulator containing decisions already made for other peers + -- \| Accumulator containing decisions already made for other peers -- It's a map in which we need to insert the new decision into (Map peerAddr (PeerDecision objectId object), Map peerAddr (Set objectId)) -> peerAddr -> DecisionPeerState objectId object -> (Map peerAddr (PeerDecision objectId object), Map peerAddr (Set objectId)) computeAckForPeer (decisionsAcc, peerToIdsToAck) peerAddr DecisionPeerState{dpsOutstandingFifo, dpsObjectsOwtPool, dpsNumIdsInflight} = - let - -- we isolate the longest prefix of outstandingFifo that matches our ack criteria (see above in computeAck doc) - (idsToAck, dpsOutstandingFifo') = - StrictSeq.spanl - (\objectId -> poolHasObject objectId || objectId `Map.member` dpsObjectsOwtPool) - dpsOutstandingFifo + let + -- we isolate the longest prefix of outstandingFifo that matches our ack criteria (see above in computeAck doc) + (idsToAck, dpsOutstandingFifo') = + StrictSeq.spanl + (\objectId -> poolHasObject objectId || objectId `Map.member` dpsObjectsOwtPool) + dpsOutstandingFifo - pdNumIdsToAck = fromIntegral $ StrictSeq.length idsToAck + pdNumIdsToAck = fromIntegral $ StrictSeq.length idsToAck - futureFifoSizeOnOutboundPeer :: NumObjectIdsReq = - -- the new known fifo state after we ack the idsToAck - (fromIntegral $ StrictSeq.length dpsOutstandingFifo') + futureFifoSizeOnOutboundPeer :: NumObjectIdsReq = + -- the new known fifo state after we ack the idsToAck + (fromIntegral $ StrictSeq.length dpsOutstandingFifo') -- plus the number of ids that we have already requested but we didn't receive yet -- that the outbound peer might consequently already have added to its fifo - + dpsNumIdsInflight + + dpsNumIdsInflight - pdNumIdsToReq = - (fromIntegral dpMaxNumObjectsOutstanding - futureFifoSizeOnOutboundPeer) + pdNumIdsToReq = + (fromIntegral dpMaxNumObjectsOutstanding - futureFifoSizeOnOutboundPeer) `min` dpMaxNumObjectIdsReq - - pdCanPipelineIdsRequests = not . StrictSeq.null $ dpsOutstandingFifo' - peerDecision = PeerDecision + pdCanPipelineIdsRequests = not . StrictSeq.null $ dpsOutstandingFifo' + + peerDecision = + PeerDecision { pdNumIdsToAck , pdNumIdsToReq , pdCanPipelineIdsRequests , pdObjectsToReqIds = Set.empty -- we don't decide this here } + in + ( Map.insert peerAddr peerDecision decisionsAcc + , Map.insert peerAddr (strictSeqToSet idsToAck) peerToIdsToAck + ) - in (Map.insert peerAddr peerDecision decisionsAcc, Map.insert peerAddr (strictSeqToSet idsToAck) peerToIdsToAck) - -orderPeers :: Map peerAddr (DecisionPeerState objectId object) -> StdGen -> ([(peerAddr, DecisionPeerState objectId object)], StdGen) +orderPeers :: + Map peerAddr (DecisionPeerState objectId object) -> + StdGen -> + ([(peerAddr, DecisionPeerState objectId object)], StdGen) orderPeers = undefined -- TODO: be careful about additive semigroup instance of PeerDecision @@ -152,107 +161,130 @@ pickObjectsToReq :: Map peerAddr (Set objectId) -> -- | new global state (with just RNG updated), and objects to request from each peer (DecisionGlobalState peerAddr objectId object, Map peerAddr (Set objectId)) -pickObjectsToReq poolHasObject DecisionPolicy{dpMaxNumObjectsInflightPerPeer, dpMaxNumObjectsInflightTotal,dpMaxObjectInflightMultiplicity} globalState@DecisionGlobalState{dgsRng, dgsPeerStates, dgsObjectsInflightMultiplicities, dgsObjectsOwtPoolMultiplicities} peerToIdsToAck = +pickObjectsToReq poolHasObject DecisionPolicy + { dpMaxNumObjectsInflightPerPeer + , dpMaxNumObjectsInflightTotal + , dpMaxObjectInflightMultiplicity + } globalState@DecisionGlobalState + { dgsRng + , dgsPeerStates + , dgsObjectsInflightMultiplicities + , dgsObjectsOwtPoolMultiplicities + } peerToIdsToAck = (globalState{dgsRng = dgsRng'}, peersToObjectsToReq) - where - (orderedPeers, dgsRng') = orderPeers dgsPeerStates dgsRng + where + (orderedPeers, dgsRng') = orderPeers dgsPeerStates dgsRng - -- We want to map each objectId to the sorted list of peers that can provide it - -- For each peer we also indicate how many objects it has in flight at the moment - -- We filter out here the objects that are already in pool - objectsToSortedProviders :: Map objectId [(peerAddr, NumObjectsReq)] - objectsToSortedProviders = - -- We iterate over each peer and the corresponding available ids - -- and turn the map "inside-out" - Foldable.foldl' - ( \accMap (peerAddr, DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds}) -> - let -- ids that will be acked for this peer won't be available anymore, so we should not consider them in the decision logic - idsToAckForThisPeer = Map.findWithDefault (error "invariant violated: peer must be in peerToIdsToAck map") peerAddr peerToIdsToAck - -- we should also remove objects that are already in the pool - interestingAndAvailableObjectIds = Set.filter (not . poolHasObject) $ - dpsObjectsAvailableIds `Set.difference` idsToAckForThisPeer - in -- we iterate over interestingAndAvailableObjectIds and add the peer to the list of providers for each object it can provide - Foldable.foldl' - (\accMap' objectId -> Map.insertWith (++) objectId [(peerAddr, fromIntegral $ Set.size dpsObjectsInflightIds)] accMap') - accMap - interestingAndAvailableObjectIds - ) - Map.empty - orderedPeers + -- We want to map each objectId to the sorted list of peers that can provide it + -- For each peer we also indicate how many objects it has in flight at the moment + -- We filter out here the objects that are already in pool + objectsToSortedProviders :: Map objectId [(peerAddr, NumObjectsReq)] + objectsToSortedProviders = + -- We iterate over each peer and the corresponding available ids + -- and turn the map "inside-out" + Foldable.foldl' + ( \accMap (peerAddr, DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds}) -> + let + -- ids that will be acked for this peer won't be available anymore, so we should not consider them in the decision logic + idsToAckForThisPeer = + Map.findWithDefault + (error "invariant violated: peer must be in peerToIdsToAck map") + peerAddr + peerToIdsToAck + -- we should also remove objects that are already in the pool + interestingAndAvailableObjectIds = + Set.filter (not . poolHasObject) $ + dpsObjectsAvailableIds `Set.difference` idsToAckForThisPeer + in + -- we iterate over interestingAndAvailableObjectIds and add the peer to the list of providers for each object it can provide + Foldable.foldl' + ( \accMap' objectId -> Map.insertWith (++) objectId [(peerAddr, fromIntegral $ Set.size dpsObjectsInflightIds)] accMap' + ) + accMap + interestingAndAvailableObjectIds + ) + Map.empty + orderedPeers - -- We also want to know for each objects how many peers have it in the inflight or owtPool, - -- meaning that we should receive them soon. - objectsExpectedSoonMultiplicities :: Map objectId ObjectMultiplicity - objectsExpectedSoonMultiplicities = Map.unionWith (+) dgsObjectsInflightMultiplicities dgsObjectsOwtPoolMultiplicities + -- We also want to know for each objects how many peers have it in the inflight or owtPool, + -- meaning that we should receive them soon. + objectsExpectedSoonMultiplicities :: Map objectId ObjectMultiplicity + objectsExpectedSoonMultiplicities = Map.unionWith (+) dgsObjectsInflightMultiplicities dgsObjectsOwtPoolMultiplicities - -- Now we join objectsToSortedProviders and objectsExpectedSoonMultiplicities maps on objectId for easy fold - objectsToProvidersAndExpectedMultiplicities :: Map objectId ([(peerAddr, NumObjectsReq)], ObjectMultiplicity) - objectsToProvidersAndExpectedMultiplicities = - Map.merge - -- if an objectId is missing from objectsExpectedSoonMultiplicities, then its expected multiplicity is 0 - (Map.mapMissing \_ providers -> (providers, 0)) - -- if an objectId is missing from objectsToSortedProviders, then we don't care about it - Map.dropMissing - -- Combine in a tuple the list of providers and the expected multiplicity - (Map.zipWithMatched \_ providers expectedMultiplicity -> (providers, expectedMultiplicity)) - objectsToSortedProviders - objectsExpectedSoonMultiplicities - - -- Now we compute the actual attribution of downloads for peers - DownloadPickState{peersToObjectsToReq} = - -- We iterate over each objectId and the corresponding (providers, expectedMultiplicity) - Map.foldlWithKey' - ( \st objectId (providers, expectedMultiplicity) -> - -- reset the objectMultiplicity counter for each new objectId - let st' = st{objectMultiplicity = 0} + -- Now we join objectsToSortedProviders and objectsExpectedSoonMultiplicities maps on objectId for easy fold + objectsToProvidersAndExpectedMultiplicities :: + Map objectId ([(peerAddr, NumObjectsReq)], ObjectMultiplicity) + objectsToProvidersAndExpectedMultiplicities = + Map.merge + -- if an objectId is missing from objectsExpectedSoonMultiplicities, then its expected multiplicity is 0 + (Map.mapMissing \_ providers -> (providers, 0)) + -- if an objectId is missing from objectsToSortedProviders, then we don't care about it + Map.dropMissing + -- Combine in a tuple the list of providers and the expected multiplicity + (Map.zipWithMatched \_ providers expectedMultiplicity -> (providers, expectedMultiplicity)) + objectsToSortedProviders + objectsExpectedSoonMultiplicities - -- We iterate over the list of providers, and pick them or not according to the current state + -- Now we compute the actual attribution of downloads for peers + DownloadPickState{peersToObjectsToReq} = + -- We iterate over each objectId and the corresponding (providers, expectedMultiplicity) + Map.foldlWithKey' + ( \st objectId (providers, expectedMultiplicity) -> + -- reset the objectMultiplicity counter for each new objectId + let st' = st{objectMultiplicity = 0} + in -- We iterate over the list of providers, and pick them or not according to the current state -- When a peer is selected as a provider for this objectId, we insert the objectId in the peer's set in peersToObjectsToReq (inside St) -- So the result of the filtering of providers is part of the final St state - in Foldable.foldl' - (howToFoldProviders objectId expectedMultiplicity) - st' - providers - ) - DownloadPickState{ - totalNumObjectsToReq = 0 - , objectMultiplicity = 0 - , peersToObjectsToReq = Map.empty - } - objectsToProvidersAndExpectedMultiplicities + Foldable.foldl' + (howToFoldProviders objectId expectedMultiplicity) + st' + providers + ) + DownloadPickState + { totalNumObjectsToReq = 0 + , objectMultiplicity = 0 + , peersToObjectsToReq = Map.empty + } + objectsToProvidersAndExpectedMultiplicities - totalNumObjectsInflight :: NumObjectsReq - totalNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 dgsObjectsInflightMultiplicities + totalNumObjectsInflight :: NumObjectsReq + totalNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 dgsObjectsInflightMultiplicities - -- This function decides whether or not we should select a given peer as provider for the current objectId - -- it takes into account if we are expecting to obtain the object from other sources (either inflight/owt pool already, or if the object will be requested from already selected peers in this given round) - howToFoldProviders :: objectId -> ObjectMultiplicity -> DownloadPickState peerAddr objectId -> (peerAddr, NumObjectsReq) -> DownloadPickState peerAddr objectId - howToFoldProviders objectId expectedMultiplicity st@DownloadPickState{totalNumObjectsToReq, objectMultiplicity, peersToObjectsToReq} (peerAddr, numObjectsInFlight) = - let -- see what has already been attributed to this peer - objectsToReq = Map.findWithDefault Set.empty peerAddr peersToObjectsToReq + -- This function decides whether or not we should select a given peer as provider for the current objectId + -- it takes into account if we are expecting to obtain the object from other sources (either inflight/owt pool already, or if the object will be requested from already selected peers in this given round) + howToFoldProviders :: + objectId -> + ObjectMultiplicity -> + DownloadPickState peerAddr objectId -> + (peerAddr, NumObjectsReq) -> + DownloadPickState peerAddr objectId + howToFoldProviders objectId expectedMultiplicity st@DownloadPickState{totalNumObjectsToReq, objectMultiplicity, peersToObjectsToReq} (peerAddr, numObjectsInFlight) = + let + -- see what has already been attributed to this peer + objectsToReq = Map.findWithDefault Set.empty peerAddr peersToObjectsToReq - shouldSelect = - -- We should not go over the multiplicity limit per object - objectMultiplicity + expectedMultiplicity < dpMaxObjectInflightMultiplicity - -- We should not go over the total number of objects inflight limit - && totalNumObjectsInflight + totalNumObjectsToReq < dpMaxNumObjectsInflightTotal - -- We should not go over the per-peer number of objects inflight limit - && numObjectsInFlight + (fromIntegral $ Set.size objectsToReq) < dpMaxNumObjectsInflightPerPeer - - in if shouldSelect - then - -- We increase both global count and per-object count, and we add the object to the peer's set - DownloadPickState - { totalNumObjectsToReq = totalNumObjectsToReq + 1 - , objectMultiplicity = objectMultiplicity + 1 - , peersToObjectsToReq = Map.insert peerAddr (Set.insert objectId objectsToReq) peersToObjectsToReq - } - -- Or we keep the state as is if we don't select this peer - else st + shouldSelect = + -- We should not go over the multiplicity limit per object + objectMultiplicity + expectedMultiplicity < dpMaxObjectInflightMultiplicity + -- We should not go over the total number of objects inflight limit + && totalNumObjectsInflight + totalNumObjectsToReq < dpMaxNumObjectsInflightTotal + -- We should not go over the per-peer number of objects inflight limit + && numObjectsInFlight + (fromIntegral $ Set.size objectsToReq) < dpMaxNumObjectsInflightPerPeer + in + if shouldSelect + then + -- We increase both global count and per-object count, and we add the object to the peer's set + DownloadPickState + { totalNumObjectsToReq = totalNumObjectsToReq + 1 + , objectMultiplicity = objectMultiplicity + 1 + , peersToObjectsToReq = Map.insert peerAddr (Set.insert objectId objectsToReq) peersToObjectsToReq + } + -- Or we keep the state as is if we don't select this peer + else st -data DownloadPickState peerAddr objectId = - DownloadPickState - { totalNumObjectsToReq :: !NumObjectsReq, - objectMultiplicity :: ObjectMultiplicity, - peersToObjectsToReq :: Map peerAddr (Set objectId) - } +data DownloadPickState peerAddr objectId + = DownloadPickState + { totalNumObjectsToReq :: !NumObjectsReq + , objectMultiplicity :: ObjectMultiplicity + , peersToObjectsToReq :: Map peerAddr (Set objectId) + } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs index 89e89b2db7..ece542d301 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -3,8 +3,12 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy , defaultDecisionPolicy ) where -import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq (..), NumObjectsOutstanding, NumObjectsReq (..)) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types (ObjectMultiplicity) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type + ( NumObjectIdsReq (..) + , NumObjectsOutstanding + , NumObjectsReq (..) + ) -- | Policy for making decisions data DecisionPolicy = DecisionPolicy @@ -12,7 +16,7 @@ data DecisionPolicy = DecisionPolicy -- ^ a maximal number of objectIds requested at once. , dpMaxNumObjectsOutstanding :: !NumObjectsOutstanding -- ^ maximal number of dpsOutstandingFifo. - , dpMaxNumObjectsInflightPerPeer :: !NumObjectsReq + , dpMaxNumObjectsInflightPerPeer :: !NumObjectsReq -- ^ a limit of objects in-flight from a single peer, plus or minus 1. , dpMaxNumObjectsInflightTotal :: !NumObjectsReq -- ^ a limit of object size in-flight from all peers, plus or minus 1 diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 54d0d3b93e..e26d8038ff 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -19,6 +19,7 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry import Control.Concurrent.Class.MonadMVar.Strict import Control.Concurrent.Class.MonadSTM.Strict +import Control.Monad (forever) import Control.Monad.Class.MonadFork import Control.Monad.Class.MonadThrow import Control.Monad.Class.MonadTime.SI @@ -30,6 +31,7 @@ import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq +import Data.Set (Set) import Data.Set qualified as Set import Data.Void (Void) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision @@ -37,9 +39,7 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State qualified as State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API -import Control.Monad (forever) -import Data.Set (Set) -import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq, NumObjectIdsAck) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsAck, NumObjectIdsReq) -- | Communication channels between `ObjectDiffusion` mini-protocol inbound side -- and decision logic. @@ -59,10 +59,10 @@ data PeerStateAPI m objectId object = PeerStateAPI , psaOnRequestIds :: NumObjectIdsAck -> NumObjectIdsReq -> m () , psaOnRequestObjects :: Set objectId -> m () , psaOnReceivedIds :: NumObjectIdsReq -> [objectId] -> m () - -- ^ Error handling should have been done before calling this + -- ^ Error handling should have been done before calling this , psaOnReceivedObjects :: [object] -> m () - -- ^ Error handling should have been done before calling this - -- Also every object should have been validated! + -- ^ Error handling should have been done before calling this + -- Also every object should have been validated! } -- | A bracket function which registers / de-registers a new peer in @@ -100,73 +100,77 @@ withPeer peerAddr withAPI = bracket registerPeerAndCreateAPI unregisterPeer withAPI - where + where registerPeerAndCreateAPI :: m (PeerStateAPI m objectId object) registerPeerAndCreateAPI = do - -- create the API for this peer, obtaining a channel for it in the process - !inboundPeerAPI <- - modifyMVar - decisionChannelsVar - \peerToChannel -> do - -- We get a channel for this peer, and register it in peerToChannel. - (chan', peerToChannel') <- - case peerToChannel Map.!? peerAddr of - -- Checks if a channel already exists for this peer, in case we reuse it - Just chan -> return (chan, peerToChannel) - -- Otherwise create a new channel and register it - Nothing -> do - chan <- newEmptyMVar - return (chan, Map.insert peerAddr chan peerToChannel) - return - ( peerToChannel' - , PeerStateAPI - { psaReadDecision = takeMVar chan' - , psaOnRequestIds = State.onRequestIds - objectDiffusionTracer - decisionTracer - globalStateVar - objectPoolWriter - peerAddr - , psaOnRequestObjects = State.onRequestObjects - objectDiffusionTracer - decisionTracer - globalStateVar - objectPoolWriter - peerAddr - , psaOnReceivedIds = State.onReceiveIds - objectDiffusionTracer - decisionTracer - globalStateVar - objectPoolWriter - peerAddr - , psaOnReceivedObjects = State.onReceiveObjects - objectDiffusionTracer - decisionTracer - globalStateVar - objectPoolWriter - objectPoolSem - peerAddr - } - ) - -- register the peer in the global state now - atomically $ modifyTVar globalStateVar registerPeerGlobalState - -- initialization is complete for this peer, it can proceed and - -- interact through its given API - return inboundPeerAPI - where + -- create the API for this peer, obtaining a channel for it in the process + !inboundPeerAPI <- + modifyMVar + decisionChannelsVar + \peerToChannel -> do + -- We get a channel for this peer, and register it in peerToChannel. + (chan', peerToChannel') <- + case peerToChannel Map.!? peerAddr of + -- Checks if a channel already exists for this peer, in case we reuse it + Just chan -> return (chan, peerToChannel) + -- Otherwise create a new channel and register it + Nothing -> do + chan <- newEmptyMVar + return (chan, Map.insert peerAddr chan peerToChannel) + return + ( peerToChannel' + , PeerStateAPI + { psaReadDecision = takeMVar chan' + , psaOnRequestIds = + State.onRequestIds + objectDiffusionTracer + decisionTracer + globalStateVar + objectPoolWriter + peerAddr + , psaOnRequestObjects = + State.onRequestObjects + objectDiffusionTracer + decisionTracer + globalStateVar + objectPoolWriter + peerAddr + , psaOnReceivedIds = + State.onReceiveIds + objectDiffusionTracer + decisionTracer + globalStateVar + objectPoolWriter + peerAddr + , psaOnReceivedObjects = + State.onReceiveObjects + objectDiffusionTracer + decisionTracer + globalStateVar + objectPoolWriter + objectPoolSem + peerAddr + } + ) + -- register the peer in the global state now + atomically $ modifyTVar globalStateVar registerPeerGlobalState + -- initialization is complete for this peer, it can proceed and + -- interact through its given API + return inboundPeerAPI + where unregisterPeer :: PeerStateAPI m objectId object -> m () unregisterPeer _ = -- the handler is a short blocking operation, thus we need to use -- `uninterruptibleMask_` uninterruptibleMask_ do - -- unregister the peer from the global state - atomically $ modifyTVar globalStateVar unregisterPeerGlobalState - -- remove the channel of this peer from the global channel map - modifyMVar_ - decisionChannelsVar - \peerToChannel -> - return $ Map.delete peerAddr peerToChannel + -- unregister the peer from the global state + atomically $ modifyTVar globalStateVar unregisterPeerGlobalState + -- remove the channel of this peer from the global channel map + modifyMVar_ + decisionChannelsVar + \peerToChannel -> + return $ Map.delete peerAddr peerToChannel registerPeerGlobalState :: DecisionGlobalState peerAddr objectId object -> @@ -212,7 +216,12 @@ withPeer ) = Map.alterF ( \case - Nothing -> error ("ObjectDiffusion.withPeer: can't unregister peer " ++ show peerAddr ++ " because it isn't registered") + Nothing -> + error + ( "ObjectDiffusion.withPeer: can't unregister peer " + ++ show peerAddr + ++ " because it isn't registered" + ) Just a -> (a, Nothing) ) peerAddr @@ -280,7 +289,7 @@ decisionLogicThread decisionTracer countersTracer ObjectPoolWriter{opwHasObject} -- Send the decisions to the corresponding peers -- Note that decisions are incremental, so we merge the old one to the new one (using the semigroup instance) if there is an old one traverse_ (uncurry putMVar) peerToChannelAndDecision - + traceWith countersTracer (makeObjectDiffusionCounters globalState') -- `5ms` delay diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index ca0f2fff30..ee10c944be 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -22,13 +22,13 @@ import Data.Foldable qualified as Foldable import Data.Map.Strict (Map, findWithDefault) import Data.Map.Strict qualified as Map import Data.Sequence.Strict qualified as StrictSeq -import Data.Set ((\\), Set) +import Data.Set (Set, (\\)) import Data.Set qualified as Set import GHC.Stack (HasCallStack) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (ObjectPoolWriter (..)) -import Ouroboros.Consensus.Util.IOLike (MonadMask, MonadMVar, bracket_) -import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsReq, NumObjectIdsAck) +import Ouroboros.Consensus.Util.IOLike (MonadMVar, MonadMask, bracket_) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsAck, NumObjectIdsReq) onRequestIds :: forall m peerAddr object objectId. @@ -48,7 +48,8 @@ onRequestIds odTracer decisionTracer globalStateVar _objectPoolWriter peerAddr n globalStateVar ( \globalState -> let globalState' = onRequestIdsImpl peerAddr numIdsToAck numIdsToReq globalState - in (globalState', globalState') ) + in (globalState', globalState') + ) traceWith odTracer (TraceObjectDiffusionInboundRequestedIds (fromIntegral numIdsToReq)) traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onRequestIds" globalState') @@ -73,30 +74,33 @@ onRequestIdsImpl globalState { dgsPeerStates = dgsPeerStates' } - where - dgsPeerStates' = - Map.adjust - (\ps@DecisionPeerState{dpsNumIdsInflight, dpsOutstandingFifo, dpsObjectsAvailableIds} -> - -- we isolate the longest prefix of outstandingFifo that matches our ack criteria (see above in computeAck doc) - let -- We compute the ids to ack and new state of the FIFO based on the number of ids to ack given by the decision logic - (idsToAck, dpsOutstandingFifo') = - StrictSeq.splitAt - (fromIntegral numIdsToAck) - dpsOutstandingFifo + where + dgsPeerStates' = + Map.adjust + ( \ps@DecisionPeerState{dpsNumIdsInflight, dpsOutstandingFifo, dpsObjectsAvailableIds} -> + -- we isolate the longest prefix of outstandingFifo that matches our ack criteria (see above in computeAck doc) + let + -- We compute the ids to ack and new state of the FIFO based on the number of ids to ack given by the decision logic + (idsToAck, dpsOutstandingFifo') = + StrictSeq.splitAt + (fromIntegral numIdsToAck) + dpsOutstandingFifo - -- We remove the acknowledged ids from dpsObjectsAvailableIds if they were present. - -- We need to do that because objects that were advertised by this corresponding outbound peer - -- but never downloaded because we already have them in pool were consequently never removed - -- from dpsObjectsAvailableIds by onRequestObjects - dpsObjectsAvailableIds' = - Foldable.foldl' (\set objectId -> Set.delete objectId set) dpsObjectsAvailableIds idsToAck - - in ps{dpsNumIdsInflight = dpsNumIdsInflight + numIdsToReq - , dpsOutstandingFifo = dpsOutstandingFifo' - , dpsObjectsAvailableIds = dpsObjectsAvailableIds'} - ) - peerAddr - dgsPeerStates + -- We remove the acknowledged ids from dpsObjectsAvailableIds if they were present. + -- We need to do that because objects that were advertised by this corresponding outbound peer + -- but never downloaded because we already have them in pool were consequently never removed + -- from dpsObjectsAvailableIds by onRequestObjects + dpsObjectsAvailableIds' = + Foldable.foldl' (\set objectId -> Set.delete objectId set) dpsObjectsAvailableIds idsToAck + in + ps + { dpsNumIdsInflight = dpsNumIdsInflight + numIdsToReq + , dpsOutstandingFifo = dpsOutstandingFifo' + , dpsObjectsAvailableIds = dpsObjectsAvailableIds' + } + ) + peerAddr + dgsPeerStates onRequestObjects :: forall m peerAddr object objectId. @@ -115,7 +119,8 @@ onRequestObjects odTracer decisionTracer globalStateVar _objectPoolWriter peerAd globalStateVar ( \globalState -> let globalState' = onRequestObjectsImpl peerAddr objectIds globalState - in (globalState', globalState') ) + in (globalState', globalState') + ) traceWith odTracer (TraceObjectDiffusionInboundRequestedObjects (Set.size objectIds)) traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onRequestObjects" globalState') @@ -138,19 +143,22 @@ onRequestObjectsImpl { dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' , dgsPeerStates = dgsPeerStates' } - where - dgsObjectsInflightMultiplicities' = - Foldable.foldl' - increaseCount - dgsObjectsInflightMultiplicities - objectIds - dgsPeerStates' = - Map.adjust - (\ps@DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} -> - ps{dpsObjectsAvailableIds = dpsObjectsAvailableIds \\ objectIds, - dpsObjectsInflightIds = dpsObjectsInflightIds `Set.union` objectIds}) - peerAddr - dgsPeerStates + where + dgsObjectsInflightMultiplicities' = + Foldable.foldl' + increaseCount + dgsObjectsInflightMultiplicities + objectIds + dgsPeerStates' = + Map.adjust + ( \ps@DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} -> + ps + { dpsObjectsAvailableIds = dpsObjectsAvailableIds \\ objectIds + , dpsObjectsInflightIds = dpsObjectsInflightIds `Set.union` objectIds + } + ) + peerAddr + dgsPeerStates -- | Wrapper around `onReceiveIdsImpl`. -- Obtain the `hasObject` function atomically from the STM context and @@ -175,8 +183,10 @@ onReceiveIds odTracer decisionTracer globalStateVar objectPoolWriter peerAddr nu hasObject <- opwHasObject objectPoolWriter stateTVar globalStateVar - ( \globalState -> let globalState' = onReceiveIdsImpl hasObject peerAddr numIdsInitiallyRequested receivedIds globalState - in (globalState', globalState') ) + ( \globalState -> + let globalState' = onReceiveIdsImpl hasObject peerAddr numIdsInitiallyRequested receivedIds globalState + in (globalState', globalState') + ) traceWith odTracer (TraceObjectDiffusionInboundReceivedIds (length receivedIds)) traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onReceiveIds" globalState') @@ -205,38 +215,38 @@ onReceiveIdsImpl globalState { dgsPeerStates = dgsPeerStates' } - where - peerState@DecisionPeerState - { dpsOutstandingFifo - , dpsObjectsInflightIds - , dpsObjectsAvailableIds - , dpsNumIdsInflight - } = + where + peerState@DecisionPeerState + { dpsOutstandingFifo + , dpsObjectsInflightIds + , dpsObjectsAvailableIds + , dpsNumIdsInflight + } = findWithDefault (error "ObjectDiffusion.onReceiveIdsImpl: the peer should appear in dgsPeerStates") peerAddr dgsPeerStates - - -- Actually we don't need to filter out availableIds, because - -- makeDecisions is the only reader of dpsObjectsAvailableIds - -- and will filter it when needed with the actualized state of the object - -- pool. - dpsObjectsAvailableIds' = - dpsObjectsAvailableIds `Set.union` Set.fromList receivedIds - -- Add received objectIds to `dpsOutstandingFifo`. - dpsOutstandingFifo' = dpsOutstandingFifo <> StrictSeq.fromList receivedIds + -- Actually we don't need to filter out availableIds, because + -- makeDecisions is the only reader of dpsObjectsAvailableIds + -- and will filter it when needed with the actualized state of the object + -- pool. + dpsObjectsAvailableIds' = + dpsObjectsAvailableIds `Set.union` Set.fromList receivedIds + + -- Add received objectIds to `dpsOutstandingFifo`. + dpsOutstandingFifo' = dpsOutstandingFifo <> StrictSeq.fromList receivedIds + + peerState' = + assert + (dpsNumIdsInflight >= numIdsInitiallyRequested) + peerState + { dpsObjectsAvailableIds = dpsObjectsAvailableIds' + , dpsOutstandingFifo = dpsOutstandingFifo' + , dpsNumIdsInflight = dpsNumIdsInflight - numIdsInitiallyRequested + } - peerState' = - assert - (dpsNumIdsInflight >= numIdsInitiallyRequested) - peerState - { dpsObjectsAvailableIds = dpsObjectsAvailableIds' - , dpsOutstandingFifo = dpsOutstandingFifo' - , dpsNumIdsInflight = dpsNumIdsInflight - numIdsInitiallyRequested - } - - dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates + dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates -- | Wrapper around `onReceiveObjectsImpl` that updates and traces the -- global state TVar. @@ -271,12 +281,13 @@ onReceiveObjects odTracer tracer globalStateVar objectPoolWriter poolSem peerAdd stateTVar globalStateVar ( \globalState -> - let globalState' = - onReceiveObjectsImpl - peerAddr - objectsReceivedMap - globalState - in (globalState', globalState')) + let globalState' = + onReceiveObjectsImpl + peerAddr + objectsReceivedMap + globalState + in (globalState', globalState') + ) traceWith odTracer (TraceObjectDiffusionInboundReceivedObjects (length objectsReceived)) traceWith tracer (TraceDecisionLogicGlobalStateUpdated "onReceiveObjects" globalState') submitObjectsToPool @@ -311,43 +322,43 @@ onReceiveObjectsImpl , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' , dgsPeerStates = dgsPeerStates' } - where - objectsReceivedIds = Map.keysSet objectsReceived + where + objectsReceivedIds = Map.keysSet objectsReceived - peerState@DecisionPeerState - { dpsObjectsInflightIds - , dpsObjectsOwtPool - } = + peerState@DecisionPeerState + { dpsObjectsInflightIds + , dpsObjectsOwtPool + } = findWithDefault (error "ObjectDiffusion.onReceiveObjectsImpl: the peer should appear in dgsPeerStates") peerAddr dgsPeerStates - -- subtract requested from in-flight - dpsObjectsInflightIds' = - dpsObjectsInflightIds \\ objectsReceivedIds + -- subtract requested from in-flight + dpsObjectsInflightIds' = + dpsObjectsInflightIds \\ objectsReceivedIds - dgsObjectsInflightMultiplicities' = - Foldable.foldl' - decreaseCount - dgsObjectsInflightMultiplicities - objectsReceivedIds - - dpsObjectsOwtPool' = dpsObjectsOwtPool <> objectsReceived + dgsObjectsInflightMultiplicities' = + Foldable.foldl' + decreaseCount + dgsObjectsInflightMultiplicities + objectsReceivedIds - dgsObjectsOwtPoolMultiplicities' = - Foldable.foldl' - increaseCount - dgsObjectsOwtPoolMultiplicities - objectsReceivedIds + dpsObjectsOwtPool' = dpsObjectsOwtPool <> objectsReceived - peerState' = - peerState - { dpsObjectsInflightIds = dpsObjectsInflightIds' - , dpsObjectsOwtPool = dpsObjectsOwtPool' - } + dgsObjectsOwtPoolMultiplicities' = + Foldable.foldl' + increaseCount + dgsObjectsOwtPoolMultiplicities + objectsReceivedIds - dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates + peerState' = + peerState + { dpsObjectsInflightIds = dpsObjectsInflightIds' + , dpsObjectsOwtPool = dpsObjectsOwtPool' + } + + dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates -- | Should be called by `acknowledgeIds` submitObjectsToPool :: @@ -374,29 +385,33 @@ submitObjectsToPool (ObjectPoolSem poolSem) peerAddr objects = do - let getId = opwObjectId objectPoolWriter + let getId = opwObjectId objectPoolWriter - bracket_ - (atomically $ waitTSem poolSem) - (atomically $ signalTSem poolSem) - $ do + bracket_ + (atomically $ waitTSem poolSem) + (atomically $ signalTSem poolSem) + $ do + -- When the lock over the object pool is obtained + opwAddObjects objectPoolWriter (Map.elems objects) + traceWith odTracer $ + TraceObjectDiffusionInboundAddedObjects $ + length objects - -- When the lock over the object pool is obtained - opwAddObjects objectPoolWriter (Map.elems objects) - traceWith odTracer $ - TraceObjectDiffusionInboundAddedObjects $ length objects - - -- Move objects from `owtPool` to `inPool` state - globalState' <- atomically $ stateTVar globalStateVar $ \globalState -> - let globalState' = - Foldable.foldl' - (\st object -> updateStateWhenObjectAddedToPool (getId object) st) - globalState - objects - in (globalState', globalState') - traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "submitObjectsToPool.updateStateWhenObjectAddedToPool" globalState') - - where + -- Move objects from `owtPool` to `inPool` state + globalState' <- atomically $ stateTVar globalStateVar $ \globalState -> + let globalState' = + Foldable.foldl' + (\st object -> updateStateWhenObjectAddedToPool (getId object) st) + globalState + objects + in (globalState', globalState') + traceWith + decisionTracer + ( TraceDecisionLogicGlobalStateUpdated + "submitObjectsToPool.updateStateWhenObjectAddedToPool" + globalState' + ) + where updateStateWhenObjectAddedToPool :: objectId -> DecisionGlobalState peerAddr objectId object -> @@ -411,11 +426,12 @@ submitObjectsToPool { dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' , dgsPeerStates = dgsPeerStates' } - where + where dgsObjectsOwtPoolMultiplicities' = decreaseCount dgsObjectsOwtPoolMultiplicities objectId dgsPeerStates' = Map.adjust - (\ps@DecisionPeerState{dpsObjectsOwtPool} -> ps{dpsObjectsOwtPool = Map.delete objectId dpsObjectsOwtPool}) - peerAddr - dgsPeerStates + ( \ps@DecisionPeerState{dpsObjectsOwtPool} -> ps{dpsObjectsOwtPool = Map.delete objectId dpsObjectsOwtPool} + ) + peerAddr + dgsPeerStates diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index f71d317afd..d0bb04ff6e 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -1,13 +1,13 @@ {-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingVia #-} {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE ImportQualifiedPost #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE GeneralizedNewtypeDeriving #-} -{-# LANGUAGE DerivingVia #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types ( -- * DecisionPeerState @@ -48,26 +48,26 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types , newObjectPoolSem ) where -import Control.Concurrent.Class.MonadSTM.Strict (MonadSTM, atomically, StrictTVar, newTVarIO) +import Control.Concurrent.Class.MonadSTM.Strict (MonadSTM, StrictTVar, atomically, newTVarIO) import Control.Concurrent.Class.MonadSTM.TSem (TSem, newTSem) +import Control.DeepSeq (NFData) import Control.Exception (Exception (..)) import Control.Monad.Class.MonadTime.SI +import Data.Map.Merge.Strict qualified as Map import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map -import Data.Map.Merge.Strict qualified as Map import Data.Monoid (Sum (..)) import Data.Sequence.Strict (StrictSeq) +import Data.Sequence.Strict qualified as StrictSeq import Data.Set (Set) import Data.Set qualified as Set +import Data.Word (Word64) import GHC.Generics (Generic) import NoThunks.Class (NoThunks (..)) -import Ouroboros.Network.Protocol.ObjectDiffusion.Type -import System.Random (StdGen) -import Data.Word (Word64) import Ouroboros.Network.ControlMessage (ControlMessage) -import Control.DeepSeq (NFData) +import Ouroboros.Network.Protocol.ObjectDiffusion.Type import Quiet (Quiet (..)) -import qualified Data.Sequence.Strict as StrictSeq +import System.Random (StdGen) -- | Semaphore to guard access to the ObjectPool newtype ObjectPoolSem m = ObjectPoolSem (TSem m) @@ -158,17 +158,22 @@ instance NoThunks (DecisionGlobalState peerAddr objectId object) -- | Merge dpsObjectsAvailableIds from all peers of the global state. -dgsObjectsAvailableMultiplicities :: Ord objectId => DecisionGlobalState peerAddr objectId object -> Map objectId ObjectMultiplicity +dgsObjectsAvailableMultiplicities :: + Ord objectId => DecisionGlobalState peerAddr objectId object -> Map objectId ObjectMultiplicity dgsObjectsAvailableMultiplicities DecisionGlobalState{dgsPeerStates} = Map.unionsWith (+) - ( Map.fromSet (const 1) . dpsObjectsAvailableIds <$> Map.elems dgsPeerStates) - -nonZeroCountMapDiff :: (Ord k) => Map k ObjectMultiplicity -> Map k ObjectMultiplicity -> Map k ObjectMultiplicity -nonZeroCountMapDiff = Map.merge - Map.preserveMissing - Map.dropMissing - (Map.zipWithMaybeMatched (\_ count1 count2 -> let c = count1 - count2 in if c > 0 then Just c else Nothing)) + (Map.fromSet (const 1) . dpsObjectsAvailableIds <$> Map.elems dgsPeerStates) + +nonZeroCountMapDiff :: + Ord k => Map k ObjectMultiplicity -> Map k ObjectMultiplicity -> Map k ObjectMultiplicity +nonZeroCountMapDiff = + Map.merge + Map.preserveMissing + Map.dropMissing + ( Map.zipWithMaybeMatched + (\_ count1 count2 -> let c = count1 - count2 in if c > 0 then Just c else Nothing) + ) type DecisionGlobalStateVar m peerAddr objectId object = StrictTVar m (DecisionGlobalState peerAddr objectId object) @@ -267,9 +272,9 @@ newtype NumObjectsProcessed } deriving (Eq, Ord, NFData, Generic) deriving newtype (Num, Enum, Real, Integral, Bounded, NoThunks) - deriving (Semigroup) via (Sum Word64) - deriving (Monoid) via (Sum Word64) - deriving (Show) via (Quiet NumObjectsProcessed) + deriving Semigroup via (Sum Word64) + deriving Monoid via (Sum Word64) + deriving Show via (Quiet NumObjectsProcessed) newtype ObjectMultiplicity = ObjectMultiplicity @@ -277,16 +282,16 @@ newtype ObjectMultiplicity } deriving (Eq, Ord, NFData, Generic) deriving newtype (Num, Enum, Real, Integral, Bounded, NoThunks) - deriving (Semigroup) via (Sum Word64) - deriving (Monoid) via (Sum Word64) - deriving (Show) via (Quiet ObjectMultiplicity) + deriving Semigroup via (Sum Word64) + deriving Monoid via (Sum Word64) + deriving Show via (Quiet ObjectMultiplicity) increaseCount :: Ord k => Map k ObjectMultiplicity -> k -> Map k ObjectMultiplicity increaseCount mmap k = Map.alter - (\case - Nothing -> Just $! 1 - Just cnt -> Just $! succ cnt + ( \case + Nothing -> Just $! 1 + Just cnt -> Just $! succ cnt ) k mmap From b7ef54cfa11f6fc07b14e60b71fbb26966b96e52 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Tue, 14 Oct 2025 11:23:30 +0200 Subject: [PATCH 24/43] Further polishing --- .../ObjectDiffusion/Inbound/V2/Decision.hs | 24 ++++++------------- .../ObjectDiffusion/Inbound/V2/Policy.hs | 8 +++---- .../ObjectDiffusion/Inbound/V2/Types.hs | 15 +++++++----- 3 files changed, 20 insertions(+), 27 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index d80699178e..9fc919f14a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -134,17 +134,14 @@ orderPeers :: Map peerAddr (DecisionPeerState objectId object) -> StdGen -> ([(peerAddr, DecisionPeerState objectId object)], StdGen) -orderPeers = undefined +orderPeers = undefined -- TODO --- TODO: be careful about additive semigroup instance of PeerDecision --- e.g. what if an object is first available and picked to download, but the download request isn't emitted yet --- then the object is received from another peer, so we can ack it from our peer on the next makeDecision call --- So later when the download request actually takes place, we don't need the object anymore, and it will no --- longer be part of dpsObjectsAvailableIds of the peer! But also no longer in the FIFO --- So if the requestIds doing the ack has been made before the requestObject, then the server --- won't be able to serve the object. - --- pdNumIdsToAck should probably be additive, because we can't recompute/recover how many ids were pre-acked before (as they have been removed from the FIFO and from dpsObjectsAvailableIds) +data DownloadPickState peerAddr objectId + = DownloadPickState + { totalNumObjectsToReq :: !NumObjectsReq + , objectMultiplicity :: ObjectMultiplicity + , peersToObjectsToReq :: Map peerAddr (Set objectId) + } -- | This function could just be pure if it hadn't be for the rng used to order peers pickObjectsToReq :: @@ -281,10 +278,3 @@ pickObjectsToReq poolHasObject DecisionPolicy } -- Or we keep the state as is if we don't select this peer else st - -data DownloadPickState peerAddr objectId - = DownloadPickState - { totalNumObjectsToReq :: !NumObjectsReq - , objectMultiplicity :: ObjectMultiplicity - , peersToObjectsToReq :: Map peerAddr (Set objectId) - } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs index ece542d301..b840b0a0f2 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -15,11 +15,11 @@ data DecisionPolicy = DecisionPolicy { dpMaxNumObjectIdsReq :: !NumObjectIdsReq -- ^ a maximal number of objectIds requested at once. , dpMaxNumObjectsOutstanding :: !NumObjectsOutstanding - -- ^ maximal number of dpsOutstandingFifo. + -- ^ maximal number of objects in the outstanding FIFO. , dpMaxNumObjectsInflightPerPeer :: !NumObjectsReq - -- ^ a limit of objects in-flight from a single peer, plus or minus 1. + -- ^ a limit of objects in-flight from a single peer. , dpMaxNumObjectsInflightTotal :: !NumObjectsReq - -- ^ a limit of object size in-flight from all peers, plus or minus 1 + -- ^ a limit of objects in-flight from all peers for this node. , dpMaxObjectInflightMultiplicity :: !ObjectMultiplicity -- ^ from how many peers download the `objectId` simultaneously } @@ -29,7 +29,7 @@ defaultDecisionPolicy :: DecisionPolicy defaultDecisionPolicy = DecisionPolicy { dpMaxNumObjectIdsReq = 3 - , dpMaxNumObjectsOutstanding = 10 -- must be the same as objectDiffusionMaxUnacked + , dpMaxNumObjectsOutstanding = 10 -- must be the same as the outbound peer's value , dpMaxNumObjectsInflightPerPeer = 6 , dpMaxNumObjectsInflightTotal = 20 , dpMaxObjectInflightMultiplicity = 2 diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index d0bb04ff6e..ed81cff952 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -198,14 +198,17 @@ newDecisionGlobalStateVar rng = -- | Decision made by the decision logic. Each peer will receive a 'Decision'. -- -- /note:/ it is rather non-standard to represent a choice between requesting --- `objectId`s and `object`'s as a product rather than a sum type. The client will +-- `objectId`s and `object`'s as a product rather than a sum type. The client will -- need to download `object`s first and then send a request for more objectIds (and --- acknowledge some `objectId`s). Due to pipelining each client will request --- decision from the decision logic quite often (every two pipelined requests), --- but with this design a decision once taken will make the peer non-active +-- acknowledge some `objectId`s). Due to pipelining each client will request +-- decision from the decision logic quite often (every two pipelined requests). +-- +-- TODO: in the previous design, we prefiltered active peers before calling +-- `makeDecision`, so that a decision once taken would make the peer non-active -- (e.g. it won't be returned by `filterActivePeers`) for longer, and thus the --- expensive `makeDecision` computation will not need to take that peer into --- account. +-- expensive `makeDecision` computation would not need to take that peer into +-- account. This is no longer the case, but we could reintroduce this optimization +-- if needed. data PeerDecision objectId object = PeerDecision { pdNumIdsToAck :: !NumObjectIdsAck -- ^ objectId's to acknowledge From 629b1b262696b14be16f1c8f116d50e9451de6af Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Tue, 14 Oct 2025 12:53:38 +0200 Subject: [PATCH 25/43] Remove useless function --- .../MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index ed81cff952..0f95ef8f41 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -41,7 +41,6 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -- * Helpers for ObjectMultiplicity maps , increaseCount , decreaseCount - , nonZeroCountMapDiff -- * Object pool semaphore , ObjectPoolSem (..) @@ -165,16 +164,6 @@ dgsObjectsAvailableMultiplicities DecisionGlobalState{dgsPeerStates} = (+) (Map.fromSet (const 1) . dpsObjectsAvailableIds <$> Map.elems dgsPeerStates) -nonZeroCountMapDiff :: - Ord k => Map k ObjectMultiplicity -> Map k ObjectMultiplicity -> Map k ObjectMultiplicity -nonZeroCountMapDiff = - Map.merge - Map.preserveMissing - Map.dropMissing - ( Map.zipWithMaybeMatched - (\_ count1 count2 -> let c = count1 - count2 in if c > 0 then Just c else Nothing) - ) - type DecisionGlobalStateVar m peerAddr objectId object = StrictTVar m (DecisionGlobalState peerAddr objectId object) From 3ae6dc151077626acc246ca9d73952c9ec566ada Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Tue, 14 Oct 2025 19:09:31 +0200 Subject: [PATCH 26/43] Remove `StdGen` from global state --- .../ObjectDiffusion/Inbound/V2/Decision.hs | 69 +++++++++---------- .../ObjectDiffusion/Inbound/V2/Registry.hs | 14 ++-- .../ObjectDiffusion/Inbound/V2/Types.hs | 11 +-- 3 files changed, 39 insertions(+), 55 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 9fc919f14a..0a72a396e8 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -1,11 +1,8 @@ -{-# LANGUAGE BangPatterns #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE ImportQualifiedPost #-} -{-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE TupleSections #-} {-# OPTIONS_GHC -Wno-unrecognised-pragmas #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision @@ -16,26 +13,20 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision , makeDecisions ) where -import Control.Arrow ((>>>)) -import Control.Exception (assert) -import Data.Bifunctor (second) import Data.Foldable qualified as Foldable import Data.Hashable -import Data.List qualified as List import Data.Map.Merge.Strict qualified as Map import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map -import Data.Maybe (mapMaybe) import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq import Data.Set (Set) import Data.Set qualified as Set -import GHC.Stack (HasCallStack) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Network.Protocol.ObjectDiffusion.Type -import System.Random (StdGen, random) +import System.Random (StdGen) strictSeqToSet :: Ord a => StrictSeq a -> Set a strictSeqToSet = Set.fromList . Foldable.toList @@ -47,25 +38,23 @@ makeDecisions :: , Ord objectId , Hashable peerAddr ) => + StdGen -> (objectId -> Bool) -> -- | decision decisionPolicy DecisionPolicy -> -- | decision context DecisionGlobalState peerAddr objectId object -> - ( DecisionGlobalState peerAddr objectId object - , Map peerAddr (PeerDecision objectId object) - ) -makeDecisions hasObject decisionPolicy globalState = + Map peerAddr (PeerDecision objectId object) +makeDecisions rng hasObject decisionPolicy globalState = -- We do it in two steps, because computing the acknowledgment tell which objects from dpsObjectsAvailableIds sets of each peer won't actually be available anymore (as soon as we ack them), -- so that the pickObjectsToReq function can take this into account. let (ackAndRequestIdsDecisions, peerToIdsToAck) = computeAck hasObject decisionPolicy globalState - (globalState', peersToObjectsToReq) = pickObjectsToReq hasObject decisionPolicy globalState peerToIdsToAck - completeDecisions = - Map.intersectionWith - (\decision objectsToReqIds -> decision{pdObjectsToReqIds = objectsToReqIds}) - ackAndRequestIdsDecisions - peersToObjectsToReq - in (globalState', completeDecisions) + peersToObjectsToReq = pickObjectsToReq rng hasObject decisionPolicy globalState peerToIdsToAck + in + Map.intersectionWith + (\decision objectsToReqIds -> decision{pdObjectsToReqIds = objectsToReqIds}) + ackAndRequestIdsDecisions + peersToObjectsToReq -- | The ids to ack are the longest prefix of outstandingFifo of each peer that match the following criteria: -- * either the object is owt pool for the peer who has downloaded it @@ -131,10 +120,11 @@ computeAck poolHasObject DecisionPolicy{dpMaxNumObjectIdsReq, dpMaxNumObjectsOut ) orderPeers :: - Map peerAddr (DecisionPeerState objectId object) -> + Hashable peerAddr => StdGen -> - ([(peerAddr, DecisionPeerState objectId object)], StdGen) -orderPeers = undefined -- TODO + Map peerAddr (DecisionPeerState objectId object) -> + [(peerAddr, DecisionPeerState objectId object)] +orderPeers _rng = undefined -- TODO data DownloadPickState peerAddr objectId = DownloadPickState @@ -150,6 +140,7 @@ pickObjectsToReq :: , Ord objectId , Hashable peerAddr ) => + StdGen -> (objectId -> Bool) -> DecisionPolicy -> DecisionGlobalState peerAddr objectId object -> @@ -157,20 +148,24 @@ pickObjectsToReq :: -- we should treat these ids as not available anymore for the purpose of picking objects to request Map peerAddr (Set objectId) -> -- | new global state (with just RNG updated), and objects to request from each peer - (DecisionGlobalState peerAddr objectId object, Map peerAddr (Set objectId)) -pickObjectsToReq poolHasObject DecisionPolicy - { dpMaxNumObjectsInflightPerPeer - , dpMaxNumObjectsInflightTotal - , dpMaxObjectInflightMultiplicity - } globalState@DecisionGlobalState - { dgsRng - , dgsPeerStates - , dgsObjectsInflightMultiplicities - , dgsObjectsOwtPoolMultiplicities - } peerToIdsToAck = - (globalState{dgsRng = dgsRng'}, peersToObjectsToReq) + Map peerAddr (Set objectId) +pickObjectsToReq + rng + poolHasObject + DecisionPolicy + { dpMaxNumObjectsInflightPerPeer + , dpMaxNumObjectsInflightTotal + , dpMaxObjectInflightMultiplicity + } + DecisionGlobalState + { dgsPeerStates + , dgsObjectsInflightMultiplicities + , dgsObjectsOwtPoolMultiplicities + } + peerToIdsToAck = + peersToObjectsToReq where - (orderedPeers, dgsRng') = orderPeers dgsPeerStates dgsRng + orderedPeers = orderPeers rng dgsPeerStates -- We want to map each objectId to the sorted list of peers that can provide it -- For each peer we also indicate how many objects it has in flight at the moment diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index e26d8038ff..ec3569bd91 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -40,6 +40,7 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State qualifi import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsAck, NumObjectIdsReq) +import System.Random (initStdGen) -- | Communication channels between `ObjectDiffusion` mini-protocol inbound side -- and decision logic. @@ -268,14 +269,11 @@ decisionLogicThread decisionTracer countersTracer ObjectPoolWriter{opwHasObject} -- if there are too many inbound connections. threadDelay _DECISION_LOOP_DELAY - -- Make decisions and update the global state var accordingly - (globalState', decisions) <- atomically $ do + globalState <- atomically $ readTVar globalStateVar + decisions <- atomically $ do + rng <- initStdGen hasObject <- opwHasObject - stateTVar - globalStateVar - \globalState -> - let (globalState', decisions) = makeDecisions hasObject decisionPolicy globalState - in ((globalState', decisions), globalState') + pure $ makeDecisions rng hasObject decisionPolicy globalState traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "decisionLogicThread" globalState') traceWith decisionTracer (TraceDecisionLogicDecisionsMade decisions) @@ -290,7 +288,7 @@ decisionLogicThread decisionTracer countersTracer ObjectPoolWriter{opwHasObject} -- Note that decisions are incremental, so we merge the old one to the new one (using the semigroup instance) if there is an old one traverse_ (uncurry putMVar) peerToChannelAndDecision - traceWith countersTracer (makeObjectDiffusionCounters globalState') + traceWith countersTracer (makeObjectDiffusionCounters globalState) -- `5ms` delay _DECISION_LOOP_DELAY :: DiffTime diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 0f95ef8f41..bc778a5b79 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -52,21 +52,17 @@ import Control.Concurrent.Class.MonadSTM.TSem (TSem, newTSem) import Control.DeepSeq (NFData) import Control.Exception (Exception (..)) import Control.Monad.Class.MonadTime.SI -import Data.Map.Merge.Strict qualified as Map import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map import Data.Monoid (Sum (..)) import Data.Sequence.Strict (StrictSeq) -import Data.Sequence.Strict qualified as StrictSeq import Data.Set (Set) -import Data.Set qualified as Set import Data.Word (Word64) import GHC.Generics (Generic) import NoThunks.Class (NoThunks (..)) import Ouroboros.Network.ControlMessage (ControlMessage) import Ouroboros.Network.Protocol.ObjectDiffusion.Type import Quiet (Quiet (..)) -import System.Random (StdGen) -- | Semaphore to guard access to the ObjectPool newtype ObjectPoolSem m = ObjectPoolSem (TSem m) @@ -143,8 +139,6 @@ data DecisionGlobalState peerAddr objectId object = DecisionGlobalState -- -- * We subtract from the counter when a given object is added to the -- objectpool - , dgsRng :: !StdGen - -- ^ Rng used to randomly order peers } deriving (Eq, Show, Generic) @@ -152,7 +146,6 @@ instance ( NoThunks peerAddr , NoThunks object , NoThunks objectId - , NoThunks StdGen ) => NoThunks (DecisionGlobalState peerAddr objectId object) @@ -169,15 +162,13 @@ type DecisionGlobalStateVar m peerAddr objectId object = newDecisionGlobalStateVar :: MonadSTM m => - StdGen -> m (DecisionGlobalStateVar m peerAddr objectId object) -newDecisionGlobalStateVar rng = +newDecisionGlobalStateVar = newTVarIO DecisionGlobalState { dgsPeerStates = Map.empty , dgsObjectsInflightMultiplicities = Map.empty , dgsObjectsOwtPoolMultiplicities = Map.empty - , dgsRng = rng } -- From 513291ccf10008b87044a0cb88fcd1d438efe7fa Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Wed, 15 Oct 2025 09:53:41 +0200 Subject: [PATCH 27/43] Make decision only for peers that haven't read their decision yet --- .../ObjectDiffusion/Inbound/V2.mermaid | 7 -- .../ObjectDiffusion/Inbound/V2/Decision.hs | 83 +++++++++++++++---- .../ObjectDiffusion/Inbound/V2/Registry.hs | 80 +++++++----------- .../ObjectDiffusion/Inbound/V2/State.hs | 38 ++------- .../ObjectDiffusion/Inbound/V2/Types.hs | 66 +++++---------- 5 files changed, 124 insertions(+), 150 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid index 71b9768535..9f6cff0006 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid @@ -5,9 +5,6 @@ flowchart TD D(dpsObjectsInflightIds) F(dpsObjectsOwtPool) - H(dgsObjectsInflightMultiplicities) - I(dgsObjectsOwtPoolMultiplicities) - EA{onRequestIds} EA-->|+count| A B -->|-ids| EA @@ -22,14 +19,11 @@ flowchart TD EC{onRequestObjects} C -->|-ids| EC EC -->|+ids| D - EC -->|+count| H ED{onReceiveObjects / submitToPool} D -->|-ids| ED - H -->|-count| ED IN2@{ shape: lin-cyl, label: "objects" } --o ED ED -->|+objects| F - ED -->|+count| I EE{makeDecisions} EE -.->|readDecision : pdIdsToAck + pdIdsToReq + pdCanPipelineIdsReq/| EA @@ -37,4 +31,3 @@ flowchart TD EG{Added to pool} F -->|-objects| EG - I -->|-count| EG diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 0a72a396e8..964f62f2fb 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -44,12 +44,18 @@ makeDecisions :: DecisionPolicy -> -- | decision context DecisionGlobalState peerAddr objectId object -> + -- | Previous decisions + Map peerAddr (PeerDecision objectId object) -> + -- | New decisions Map peerAddr (PeerDecision objectId object) -makeDecisions rng hasObject decisionPolicy globalState = - -- We do it in two steps, because computing the acknowledgment tell which objects from dpsObjectsAvailableIds sets of each peer won't actually be available anymore (as soon as we ack them), - -- so that the pickObjectsToReq function can take this into account. - let (ackAndRequestIdsDecisions, peerToIdsToAck) = computeAck hasObject decisionPolicy globalState - peersToObjectsToReq = pickObjectsToReq rng hasObject decisionPolicy globalState peerToIdsToAck +makeDecisions rng hasObject decisionPolicy globalState prevDecisions = + let -- A subset of peers are currently executing a decision. We shouldn't update the decision for them + frozenPeersToDecisions = Map.filter pdExecutingDecision prevDecisions + + -- We do it in two steps, because computing the acknowledgment tell which objects from dpsObjectsAvailableIds sets of each peer won't actually be available anymore (as soon as we ack them), + -- so that the pickObjectsToReq function can take this into account. + (ackAndRequestIdsDecisions, peerToIdsToAck) = computeAck hasObject decisionPolicy globalState frozenPeersToDecisions + peersToObjectsToReq = pickObjectsToReq rng hasObject decisionPolicy globalState frozenPeersToDecisions peerToIdsToAck in Map.intersectionWith (\decision objectsToReqIds -> decision{pdObjectsToReqIds = objectsToReqIds}) @@ -67,12 +73,16 @@ computeAck :: (objectId -> Bool) -> DecisionPolicy -> DecisionGlobalState peerAddr objectId object -> + -- | Frozen peers and their previous decisions + Map peerAddr (PeerDecision objectId object) -> ( Map peerAddr (PeerDecision objectId object) , Map peerAddr (Set objectId) ) -computeAck poolHasObject DecisionPolicy{dpMaxNumObjectIdsReq, dpMaxNumObjectsOutstanding} DecisionGlobalState{dgsPeerStates} = - let (decisions, peerToIdsToAck) = - Map.foldlWithKey' computeAckForPeer (Map.empty, Map.empty) dgsPeerStates +computeAck poolHasObject DecisionPolicy{dpMaxNumObjectIdsReq, dpMaxNumObjectsOutstanding} DecisionGlobalState{dgsPeerStates} frozenPeersToDecisions = + let -- We shouldn't create a new decision for peers that are currently executing a decision + filteredPeerStates = Map.withoutKeys dgsPeerStates (Map.keysSet frozenPeersToDecisions) + (decisions, peerToIdsToAck) = + Map.foldlWithKey' computeAckForPeer (Map.empty, Map.empty) filteredPeerStates in ( decisions , peerToIdsToAck ) @@ -113,6 +123,7 @@ computeAck poolHasObject DecisionPolicy{dpMaxNumObjectIdsReq, dpMaxNumObjectsOut , pdNumIdsToReq , pdCanPipelineIdsRequests , pdObjectsToReqIds = Set.empty -- we don't decide this here + , pdExecutingDecision = False } in ( Map.insert peerAddr peerDecision decisionsAcc @@ -144,6 +155,8 @@ pickObjectsToReq :: (objectId -> Bool) -> DecisionPolicy -> DecisionGlobalState peerAddr objectId object -> + -- | Frozen peers and their previous decisions + Map peerAddr (PeerDecision objectId object) -> -- | map from peer to the set of ids that will be acked for that peer on next requestIds -- we should treat these ids as not available anymore for the purpose of picking objects to request Map peerAddr (Set objectId) -> @@ -159,13 +172,13 @@ pickObjectsToReq } DecisionGlobalState { dgsPeerStates - , dgsObjectsInflightMultiplicities - , dgsObjectsOwtPoolMultiplicities } + frozenPeersToDecisions peerToIdsToAck = peersToObjectsToReq where - orderedPeers = orderPeers rng dgsPeerStates + -- We order the peers that are not currently executing a decision + orderedPeers = orderPeers rng (dgsPeerStates `Map.withoutKeys` Map.keysSet frozenPeersToDecisions) -- We want to map each objectId to the sorted list of peers that can provide it -- For each peer we also indicate how many objects it has in flight at the moment @@ -198,10 +211,53 @@ pickObjectsToReq Map.empty orderedPeers + frozenPeerStatesWithDecisions = Map.intersectionWith (,) dgsPeerStates frozenPeersToDecisions + + availablePeerStates = Map.withoutKeys dgsPeerStates (Map.keysSet frozenPeersToDecisions) + + -- For frozen peers, we should consider that the objects in pdObjectsToReqIds will be requested soon, so we should consider them as inflight for the purpose of picking objects to request for other peers + objectsInFlightMultiplicitiesOfFrozenPeer = Map.foldl' + ( \accMap (DecisionPeerState{dpsObjectsInflightIds}, PeerDecision{pdObjectsToReqIds}) -> + Foldable.foldl' + ( \accMap' objectId -> Map.insertWith (+) objectId 1 accMap' + ) + accMap + (Set.union dpsObjectsInflightIds pdObjectsToReqIds) + ) + Map.empty + frozenPeerStatesWithDecisions + -- Finally, we add to the previous map the objects that are currently inflight from peers for which we will make a decision in this round + objectsInFlightMultiplicities = Map.foldl' + ( \accMap (DecisionPeerState{dpsObjectsInflightIds}) -> + Foldable.foldl' + ( \accMap' objectId -> Map.insertWith (+) objectId 1 accMap' + ) + accMap + dpsObjectsInflightIds + ) + objectsInFlightMultiplicitiesOfFrozenPeer + availablePeerStates + + totalNumObjectsInflight :: NumObjectsReq + totalNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 objectsInFlightMultiplicities + + objectsOwtPoolMultiplicities = Map.foldl' + ( \accMap (DecisionPeerState{dpsObjectsOwtPool}) -> + Foldable.foldl' + ( \accMap' objectId -> Map.insertWith (+) objectId 1 accMap' + ) + accMap + (Map.keys dpsObjectsOwtPool) + ) + Map.empty + dgsPeerStates + -- We also want to know for each objects how many peers have it in the inflight or owtPool, -- meaning that we should receive them soon. + -- We should also add here the objects that are in the pdObjectsToReqIds of each peer decision for frozen peers, + -- if these ids are not already in dpsObjectsInflight or dpsObjectsOwtPool of this peer objectsExpectedSoonMultiplicities :: Map objectId ObjectMultiplicity - objectsExpectedSoonMultiplicities = Map.unionWith (+) dgsObjectsInflightMultiplicities dgsObjectsOwtPoolMultiplicities + objectsExpectedSoonMultiplicities = Map.unionWith (+) objectsInFlightMultiplicities objectsOwtPoolMultiplicities -- Now we join objectsToSortedProviders and objectsExpectedSoonMultiplicities maps on objectId for easy fold objectsToProvidersAndExpectedMultiplicities :: @@ -239,9 +295,6 @@ pickObjectsToReq } objectsToProvidersAndExpectedMultiplicities - totalNumObjectsInflight :: NumObjectsReq - totalNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 dgsObjectsInflightMultiplicities - -- This function decides whether or not we should select a given peer as provider for the current objectId -- it takes into account if we are expecting to obtain the object from other sources (either inflight/owt pool already, or if the object will be requested from already selected peers in this given round) howToFoldProviders :: diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index ec3569bd91..4fe0a92a0e 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -41,6 +41,7 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsAck, NumObjectIdsReq) import System.Random (initStdGen) +import Control.Monad.IO.Class (MonadIO) -- | Communication channels between `ObjectDiffusion` mini-protocol inbound side -- and decision logic. @@ -57,6 +58,9 @@ newPeerDecisionChannelsVar = newMVar (Map.empty) data PeerStateAPI m objectId object = PeerStateAPI { psaReadDecision :: m (PeerDecision objectId object) -- ^ a blocking action which reads `PeerDecision` + , psaOnDecisionExecuted :: m () + -- ^ to be called by the peer when it has fully executed the decision. + -- Marks the peer as available for the `makeDecision` logic , psaOnRequestIds :: NumObjectIdsAck -> NumObjectIdsReq -> m () , psaOnRequestObjects :: Set objectId -> m () , psaOnReceivedIds :: NumObjectIdsReq -> [objectId] -> m () @@ -121,7 +125,15 @@ withPeer return ( peerToChannel' , PeerStateAPI - { psaReadDecision = takeMVar chan' + { psaReadDecision = do + decision <- takeMVar chan' + let decision' = decision{pdExecutingDecision = True} + putMVar chan' decision' + return decision' + , psaOnDecisionExecuted = do + decision <- takeMVar chan' + let decision' = decision{pdExecutingDecision = False} + putMVar chan' decision' , psaOnRequestIds = State.onRequestIds objectDiffusionTracer @@ -199,50 +211,10 @@ withPeer unregisterPeerGlobalState st@DecisionGlobalState { dgsPeerStates - , dgsObjectsInflightMultiplicities - , dgsObjectsOwtPoolMultiplicities } = st - { dgsPeerStates = dgsPeerStates' - , dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' - , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' + { dgsPeerStates = Map.delete peerAddr dgsPeerStates } - where - -- First extract the DPS of the specified peer from the DGS - ( DecisionPeerState - { dpsObjectsInflightIds - , dpsObjectsOwtPool - } - , dgsPeerStates' - ) = - Map.alterF - ( \case - Nothing -> - error - ( "ObjectDiffusion.withPeer: can't unregister peer " - ++ show peerAddr - ++ " because it isn't registered" - ) - Just a -> (a, Nothing) - ) - peerAddr - dgsPeerStates - - -- Update dgsInflightMultiplicities map by decreasing the count - -- of objects that were in-flight for this peer. - dgsObjectsInflightMultiplicities' = - Foldable.foldl' - decreaseCount - dgsObjectsInflightMultiplicities - dpsObjectsInflightIds - - -- Finally, we need to update dgsObjectsOwtPoolMultiplicities by decreasing the count of - -- each objectId which is part of the dpsObjectsOwtPool of this peer. - dgsObjectsOwtPoolMultiplicities' = - Foldable.foldl' - decreaseCount - dgsObjectsOwtPoolMultiplicities - (Map.keysSet dpsObjectsOwtPool) decisionLogicThread :: forall m peerAddr objectId object. @@ -251,6 +223,7 @@ decisionLogicThread :: , MonadSTM m , MonadFork m , MonadMask m + , MonadIO m , Ord peerAddr , Ord objectId , Hashable peerAddr @@ -269,23 +242,28 @@ decisionLogicThread decisionTracer countersTracer ObjectPoolWriter{opwHasObject} -- if there are too many inbound connections. threadDelay _DECISION_LOOP_DELAY - globalState <- atomically $ readTVar globalStateVar - decisions <- atomically $ do - rng <- initStdGen + rng <- initStdGen + + -- TODO: can we make this whole block atomic? + -- because makeDecisions should be atomic with respect to reading the global state and + -- reading the previous decisions + decisionsChannels <- readMVar decisionChannelsVar + prevDecisions <- traverse takeMVar decisionsChannels + (newDecisions, globalState) <- atomically $ do + globalState <- readTVar globalStateVar hasObject <- opwHasObject - pure $ makeDecisions rng hasObject decisionPolicy globalState + pure $ (makeDecisions rng hasObject decisionPolicy globalState prevDecisions, globalState) - traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "decisionLogicThread" globalState') - traceWith decisionTracer (TraceDecisionLogicDecisionsMade decisions) + traceWith decisionTracer (TraceDecisionLogicDecisionsMade newDecisions) peerToChannel <- readMVar decisionChannelsVar -- Pair decision channel with the corresponding decision let peerToChannelAndDecision = Map.intersectionWith (,) peerToChannel - decisions - -- Send the decisions to the corresponding peers - -- Note that decisions are incremental, so we merge the old one to the new one (using the semigroup instance) if there is an old one + newDecisions + -- Send the newDecisions to the corresponding peers + -- Note that newDecisions are incremental, so we merge the old one to the new one (using the semigroup instance) if there is an old one traverse_ (uncurry putMVar) peerToChannelAndDecision traceWith countersTracer (makeObjectDiffusionCounters globalState) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index ee10c944be..0c78e41962 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -137,18 +137,11 @@ onRequestObjectsImpl objectIds globalState@DecisionGlobalState { dgsPeerStates - , dgsObjectsInflightMultiplicities } = globalState - { dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' - , dgsPeerStates = dgsPeerStates' + { dgsPeerStates = dgsPeerStates' } where - dgsObjectsInflightMultiplicities' = - Foldable.foldl' - increaseCount - dgsObjectsInflightMultiplicities - objectIds dgsPeerStates' = Map.adjust ( \ps@DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} -> @@ -313,14 +306,9 @@ onReceiveObjectsImpl peerAddr objectsReceived st@DecisionGlobalState - { dgsPeerStates - , dgsObjectsInflightMultiplicities - , dgsObjectsOwtPoolMultiplicities - } = + { dgsPeerStates } = st - { dgsObjectsInflightMultiplicities = dgsObjectsInflightMultiplicities' - , dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' - , dgsPeerStates = dgsPeerStates' + { dgsPeerStates = dgsPeerStates' } where objectsReceivedIds = Map.keysSet objectsReceived @@ -338,20 +326,8 @@ onReceiveObjectsImpl dpsObjectsInflightIds' = dpsObjectsInflightIds \\ objectsReceivedIds - dgsObjectsInflightMultiplicities' = - Foldable.foldl' - decreaseCount - dgsObjectsInflightMultiplicities - objectsReceivedIds - dpsObjectsOwtPool' = dpsObjectsOwtPool <> objectsReceived - dgsObjectsOwtPoolMultiplicities' = - Foldable.foldl' - increaseCount - dgsObjectsOwtPoolMultiplicities - objectsReceivedIds - peerState' = peerState { dpsObjectsInflightIds = dpsObjectsInflightIds' @@ -419,16 +395,12 @@ submitObjectsToPool updateStateWhenObjectAddedToPool objectId st@DecisionGlobalState - { dgsObjectsOwtPoolMultiplicities - , dgsPeerStates + { dgsPeerStates } = st - { dgsObjectsOwtPoolMultiplicities = dgsObjectsOwtPoolMultiplicities' - , dgsPeerStates = dgsPeerStates' + { dgsPeerStates = dgsPeerStates' } where - dgsObjectsOwtPoolMultiplicities' = decreaseCount dgsObjectsOwtPoolMultiplicities objectId - dgsPeerStates' = Map.adjust ( \ps@DecisionPeerState{dpsObjectsOwtPool} -> ps{dpsObjectsOwtPool = Map.delete objectId dpsObjectsOwtPool} diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index bc778a5b79..66562e265b 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -16,6 +16,8 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -- * DecisionGlobalState , DecisionGlobalState (..) , dgsObjectsAvailableMultiplicities + , dgsObjectsInflightMultiplicities + , dgsObjectsOwtPoolMultiplicities , DecisionGlobalStateVar , newDecisionGlobalStateVar @@ -38,10 +40,6 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types , TraceObjectDiffusionInbound (..) , ObjectDiffusionInboundError (..) - -- * Helpers for ObjectMultiplicity maps - , increaseCount - , decreaseCount - -- * Object pool semaphore , ObjectPoolSem (..) , newObjectPoolSem @@ -125,20 +123,6 @@ data DecisionGlobalState peerAddr objectId object = DecisionGlobalState -- /Invariant:/ for peerAddr's which are registered using `withPeer`, -- there's always an entry in this map even if the set of `objectId`s is -- empty. - , dgsObjectsInflightMultiplicities :: !(Map objectId ObjectMultiplicity) - -- ^ Map from ids of objects which are in-flight (have already been - -- requested) to their multiplicities (from how many peers it is - -- currently in-flight) - -- - -- This can intersect with some `dpsObjectsAvailableIds`. - -- The value for any key must be always non-zero (strictly positive). - , dgsObjectsOwtPoolMultiplicities :: !(Map objectId ObjectMultiplicity) - -- ^ Map from ids of objects which have already been downloaded, validated, - -- and are on their way to the objectpool (waiting for the lock) - -- to their multiplicities - -- - -- * We subtract from the counter when a given object is added to the - -- objectpool } deriving (Eq, Show, Generic) @@ -157,6 +141,20 @@ dgsObjectsAvailableMultiplicities DecisionGlobalState{dgsPeerStates} = (+) (Map.fromSet (const 1) . dpsObjectsAvailableIds <$> Map.elems dgsPeerStates) +dgsObjectsInflightMultiplicities :: + Ord objectId => DecisionGlobalState peerAddr objectId object -> Map objectId ObjectMultiplicity +dgsObjectsInflightMultiplicities DecisionGlobalState{dgsPeerStates} = + Map.unionsWith + (+) + (Map.fromSet (const 1) . dpsObjectsInflightIds <$> Map.elems dgsPeerStates) + +dgsObjectsOwtPoolMultiplicities :: + Ord objectId => DecisionGlobalState peerAddr objectId object -> Map objectId ObjectMultiplicity +dgsObjectsOwtPoolMultiplicities DecisionGlobalState{dgsPeerStates} = + Map.unionsWith + (+) + (Map.fromSet (const 1) . Map.keysSet . dpsObjectsOwtPool <$> Map.elems dgsPeerStates) + type DecisionGlobalStateVar m peerAddr objectId object = StrictTVar m (DecisionGlobalState peerAddr objectId object) @@ -167,8 +165,6 @@ newDecisionGlobalStateVar = newTVarIO DecisionGlobalState { dgsPeerStates = Map.empty - , dgsObjectsInflightMultiplicities = Map.empty - , dgsObjectsOwtPoolMultiplicities = Map.empty } -- @@ -199,6 +195,8 @@ data PeerDecision objectId object = PeerDecision -- if we have non-acknowledged `objectId`s. , pdObjectsToReqIds :: !(Set objectId) -- ^ objectId's to download. + , pdExecutingDecision :: !Bool + -- ^ Whether the peer is actually executing the said decision } deriving (Show, Eq) @@ -228,15 +226,12 @@ makeObjectDiffusionCounters :: DecisionGlobalState peerAddr objectId object -> ObjectDiffusionCounters makeObjectDiffusionCounters - dgs@DecisionGlobalState - { dgsObjectsInflightMultiplicities - , dgsObjectsOwtPoolMultiplicities - } = + dgs = ObjectDiffusionCounters { odcNumDistinctObjectsAvailable = Map.size $ dgsObjectsAvailableMultiplicities dgs - , odcNumDistinctObjectsInflight = Map.size dgsObjectsInflightMultiplicities - , odcNumTotalObjectsInflight = fromIntegral $ mconcat (Map.elems dgsObjectsInflightMultiplicities) - , odcNumDistinctObjectsOwtPool = Map.size dgsObjectsOwtPoolMultiplicities + , odcNumDistinctObjectsInflight = Map.size $ dgsObjectsInflightMultiplicities dgs + , odcNumTotalObjectsInflight = fromIntegral . mconcat . Map.elems $ dgsObjectsInflightMultiplicities dgs + , odcNumDistinctObjectsOwtPool = Map.size $ dgsObjectsOwtPoolMultiplicities dgs } data ObjectDiffusionInitDelay @@ -269,23 +264,6 @@ newtype ObjectMultiplicity deriving Monoid via (Sum Word64) deriving Show via (Quiet ObjectMultiplicity) -increaseCount :: Ord k => Map k ObjectMultiplicity -> k -> Map k ObjectMultiplicity -increaseCount mmap k = - Map.alter - ( \case - Nothing -> Just $! 1 - Just cnt -> Just $! succ cnt - ) - k - mmap - -decreaseCount :: Ord k => Map k ObjectMultiplicity -> k -> Map k ObjectMultiplicity -decreaseCount mmap k = - Map.update - (\n -> if n > 1 then Just $! pred n else Nothing) - k - mmap - data TraceObjectDiffusionInbound objectId object = TraceObjectDiffusionInboundRequestedIds Int | TraceObjectDiffusionInboundRequestedObjects Int From 8c4094a7eda05e0e54ee25ae69431b6e35952260 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Wed, 15 Oct 2025 12:49:48 +0200 Subject: [PATCH 28/43] WIP V2.hs Co-authored-by: nbacquey --- .../ObjectDiffusion/Inbound/V2.hs | 161 ++++++++++-------- .../ObjectDiffusion/Inbound/V2/Decision.hs | 5 + .../ObjectDiffusion/Inbound/V2/Registry.hs | 4 +- 3 files changed, 98 insertions(+), 72 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 85c1993aa6..09a3fd3700 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -6,6 +6,7 @@ {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE BlockArguments #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 ( -- * ObjectDiffusion Inbound client @@ -45,8 +46,10 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types as V2 import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API import Ouroboros.Network.ControlMessage (ControlMessage (..), ControlMessageSTM) import Ouroboros.Network.Protocol.ObjectDiffusion.Inbound +import Network.TypedProtocol.Peer +import Ouroboros.Network.Protocol.ObjectDiffusion.Type --- | A object-submission inbound side (client). +-- | A object-diffusion inbound side (client). -- -- The goIdle' function blocks on receiving `PeerDecision` from the decision logic. objectDiffusionInbound :: @@ -66,21 +69,29 @@ objectDiffusionInbound controlMessageSTM PeerStateAPI { psaReadDecision + , psaOnDecisionExecuted , psaOnRequestIds , psaOnRequestObjects , psaOnReceivedIds , psaOnReceivedObjects } = ObjectDiffusionInboundPipelined $ do + -- TODO: delete initDelay case initDelay of ObjectDiffusionInitDelay delay -> threadDelay delay NoObjectDiffusionInitDelay -> return () (goIdle Zero) where - goIdle :: - forall (n :: N). - Nat n -> - m (InboundStIdle Z objectId object m ()) + + terminateAfterDrain :: + Nat n -> InboundStIdle n objectId object m () + terminateAfterDrain = \case + Zero -> SendMsgDone (pure ()) + Succ n -> CollectPipelined Nothing $ \_ignoredMsg -> pure $ terminateAfterDrain n + + -- Wrapper around goIdle' that handles termination on reception of + -- Terminate control message. + goIdle :: forall (n :: N). Nat n -> m (InboundStIdle n objectId object m ()) goIdle n = do ctrlMsg <- atomically controlMessageSTM traceWith tracer $ TraceObjectDiffusionInboundReceivedControlMessage ctrlMsg @@ -89,11 +100,9 @@ objectDiffusionInbound Terminate -> pure $ terminateAfterDrain n -- Otherwise, we can continue the protocol normally. - _continue -> goIdle' - goIdle' :: - forall (n :: N). - Nat n -> - m (InboundStIdle Z objectId object m ()) + _continue -> goIdle' n + + goIdle' :: forall (n :: N). Nat n -> m (InboundStIdle n objectId object m ()) goIdle' n = do -- Block on next decision. decision@PeerDecision @@ -105,75 +114,87 @@ objectDiffusionInbound psaReadDecision traceWith tracer (TraceObjectDiffusionInboundReceivedDecision decision) - when (not StrictSeq.null pdObjectsToSubmitToPoolIds) $ do - psaSubmitObjectsToPool pdObjectsToSubmitToPoolIds - - let shouldRequestMoreObjects = not $ Set.null pdObjectsToReqIds - + -- We need to make sure we don't go again into `goIdle` until `psaOnDecisionExecuted` has been called case n of - -- We didn't pipeline any requests, so there are no replies in flight - -- (nothing to collect) - Zero -> - if shouldRequestMoreObjects - then do - -- There are no replies in flight, but we do know some more objects - -- we can ask for, so lets ask for them and more objectIds in a - -- pipelined way. - traceWith tracer (TraceObjectDiffusionInboundCanRequestMoreObjects (natToInt n)) - goReqObjectsAndIdsPipelined Zero decision - else do - -- There's no replies in flight, and we have no more objects we can - -- ask for so the only remaining thing to do is to ask for more - -- objectIds. Since this is the only thing to do now, we make this a - -- blocking call. - traceWith tracer (TraceObjectDiffusionInboundCannotRequestMoreObjects (natToInt n)) - goReqIdsBlocking decision - - -- We have pipelined some requests, so there are some replies in flight. - n@(Succ _) -> - if shouldRequestMoreObjects - then do - -- We have replies in flight and we should eagerly collect them if - -- available, but there are objects to request too so we - -- should *not* block waiting for replies. - -- So we ask for new objects and objectIds in a pipelined way. - pure $ - CollectPipelined - -- if no replies are available immediately, we continue with - (Just (goReqObjectsIdsPipelined n decision)) - -- if one reply is available, we go collect it. - -- We will continue to goIdle after; so in practice we will loop - -- until all immediately available replies have been collected - -- before requesting objects and ids in a pipelined fashion - (goCollect n decision) - else do undefined + Zero -> goReqObjectsAndIds Zero decision + n@Succ{} -> pure $ + CollectPipelined + (Just (goReqObjectsAndIds n decision)) + (\collectResult -> undefined) -- loopUntilAllCollected; goReqObjectsAndIds n) + goReqObjectsAndIds :: + Nat n -> + PeerDecision objectId object -> + m (InboundStIdle n objectId object m ()) + goReqObjectsAndIds n object@PeerDecision{pdObjectsToReqIds} = if Set.null pdObjectsToReqIds - then goReqObjectIds Zero undefined - else goReqObjects undefined + then + goReqIds n object + else do + psaOnRequestObjects pdObjectsToReqIds + pure $ SendMsgRequestObjectsPipelined + (Set.toList pdObjectsToReqIds) + (goReqIds (Succ n) object) - -- Pipelined request of objects - goReqObjects :: + goReqIds :: + forall (n :: N). + Nat n -> + PeerDecision objectId object -> + m (InboundStIdle n objectId object m ()) + goReqIds n pd@PeerDecision{pdCanPipelineIdsRequests} = + if pdCanPipelineIdsRequests + then goReqIdsPipelined n pd + else case n of + Zero -> goReqIdsBlocking pd + Succ{} -> error "Impossible to have pipelined requests when we have no known unacknowledged objectIds" + + goReqIdsBlocking :: PeerDecision objectId object -> m (InboundStIdle Z objectId object m ()) - goReqObjects object@PeerDecision{pdObjectsToReqIds = pdObjectsToReqIds} = - pure $ - SendMsgRequestObjectsPipelined - (Set.toList pdObjectsToReqIds) - (goReqObjectIds (Succ Zero) object) + goReqIdsBlocking PeerDecision{pdNumIdsToAck, pdNumIdsToReq} = + if pdNumIdsToReq == 0 + then do + psaOnDecisionExecuted + goIdle Zero + else do + psaOnRequestIds pdNumIdsToAck pdNumIdsToReq + psaOnDecisionExecuted + pure $ SendMsgRequestObjectIdsBlocking + pdNumIdsToAck + pdNumIdsToReq + ( \objectIds -> do + psaOnReceivedIds pdNumIdsToReq (NonEmpty.toList objectIds) + goIdle Zero + ) - goReqObjectIds :: + goReqIdsPipelined :: forall (n :: N). Nat n -> PeerDecision objectId object -> m (InboundStIdle n objectId object m ()) - goReqObjectIds - n - PeerDecision{pdNumIdsToReq = 0} = - case n of - Zero -> goIdle - Succ _ -> handleReplies n - goReqObjectIds + goReqIdsPipelined n PeerDecision{pdNumIdsToAck, pdNumIdsToReq} = + if pdNumIdsToReq == 0 + then do + psaOnDecisionExecuted + goIdle n + else do + psaOnRequestIds pdNumIdsToAck pdNumIdsToReq + psaOnDecisionExecuted + pure $ SendMsgRequestObjectIdsPipelined + pdNumIdsToAck + pdNumIdsToReq + (goIdle (Succ n)) + + goCollectIds :: Nat n -> NumObjectIdsReq -> [objectId] -> m (InboundStIdle n objectId object m ()) + goCollectIds n numIdsRequested ids = do + psaOnReceivedIds numIdsRequested ids + undefined + + --------------------------------------------------------------------------- + -- OLD STUFF FOR REFERENCE BELOW + --------------------------------------------------------------------------- + + goReqIds' -- if there are no unacknowledged objectIds, the protocol requires sending -- a blocking `MsgRequestObjectIds` request. This is important, as otherwise -- the client side wouldn't have a chance to terminate the @@ -199,7 +220,7 @@ objectDiffusionInbound onReceiveIds objectIdsToReq receivedIdsSeq objectIdsMap goIdle ) - goReqObjectIds + goReqIds' n@Zero PeerDecision { pdNumIdsToAck = objectIdsToAck @@ -211,7 +232,7 @@ objectDiffusionInbound objectIdsToAck objectIdsToReq (handleReplies (Succ n)) - goReqObjectIds + goReqIds' n@Succ{} PeerDecision { pdNumIdsToAck = objectIdsToAck diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 964f62f2fb..48d055ec9a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -273,6 +273,11 @@ pickObjectsToReq objectsToSortedProviders objectsExpectedSoonMultiplicities + -- NOW HERE TAKE PLACE THE ACTUAL DECISION LOGIC AND ATTRIBUTION OF OBJECTS TO PEERS + + -- The current decision logic is greedy on objects, so it will try to request as many copies of the same object as possible, + -- meaning we will have optimal coverage of the first objects, but might not request some other objects at all if they are (only) provided by peers that are already saturated. + -- Now we compute the actual attribution of downloads for peers DownloadPickState{peersToObjectsToReq} = -- We iterate over each objectId and the corresponding (providers, expectedMultiplicity) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 4fe0a92a0e..612e20891a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -125,12 +125,12 @@ withPeer return ( peerToChannel' , PeerStateAPI - { psaReadDecision = do + { psaReadDecision = do -- TODO: make atomic decision <- takeMVar chan' let decision' = decision{pdExecutingDecision = True} putMVar chan' decision' return decision' - , psaOnDecisionExecuted = do + , psaOnDecisionExecuted = do -- TODO: make atomic decision <- takeMVar chan' let decision' = decision{pdExecutingDecision = False} putMVar chan' decision' From 2b3cdd5d44032a7d61d377156886219447ece955 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Wed, 15 Oct 2025 13:59:50 +0200 Subject: [PATCH 29/43] First version that builds! Youpiii! --- cabal.project | 4 +- .../Ouroboros/Consensus/Node/GSM/PeerState.hs | 2 +- .../Ouroboros/Consensus/NodeKernel.hs | 2 +- .../ObjectDiffusion/Inbound/V1.hs | 29 +-- .../ObjectDiffusion/Inbound/V2.hs | 246 +++++------------- .../ObjectDiffusion/Inbound/V2/Decision.hs | 4 - .../ObjectDiffusion/Inbound/V2/Registry.hs | 18 +- .../ObjectDiffusion/Inbound/V2/State.hs | 21 +- .../ObjectDiffusion/Inbound/V2/Types.hs | 15 -- .../MiniProtocol/ObjectDiffusion/Smoke.hs | 2 +- 10 files changed, 92 insertions(+), 251 deletions(-) diff --git a/cabal.project b/cabal.project index 17c612c568..06c514ccbb 100644 --- a/cabal.project +++ b/cabal.project @@ -59,8 +59,8 @@ allow-newer: source-repository-package type: git location: https://github.com/IntersectMBO/ouroboros-network - tag: peras-staging/pr-5202 - --sha256: sha256-nTbjunQaqt6/syzSKw24Lne50083dI2SZFirG2/1T9U= + tag: 04de687146c07c17a6566577040781f66a9e8a11 + --sha256: sha256-97KSu1vlohcGyBhGQdGuEPErPaK01Y5RPepfEtcclMM= subdir: ouroboros-network ouroboros-network-protocols diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/GSM/PeerState.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/GSM/PeerState.hs index defc3abe33..ce092dad15 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/GSM/PeerState.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/GSM/PeerState.hs @@ -17,7 +17,7 @@ import Ouroboros.Consensus.MiniProtocol.ChainSync.Client.State , ChainSyncClientHandleCollection (..) , ChainSyncState (..) ) -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.State +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1.State ( ObjectDiffusionInboundHandle (..) , ObjectDiffusionInboundHandleCollection (..) , ObjectDiffusionInboundState (..) diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs index cff137e5a6..251107e713 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs @@ -82,7 +82,7 @@ import Ouroboros.Consensus.MiniProtocol.ChainSync.Client.HistoricityCheck import Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck ( SomeHeaderInFutureCheck ) -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.State +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1.State ( ObjectDiffusionInboundHandleCollection (..) , newObjectDiffusionInboundHandleCollection ) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs index 51d65c469e..a9e65d27e2 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs @@ -38,7 +38,7 @@ import Data.Word (Word64) import GHC.Generics (Generic) import Network.TypedProtocol.Core (N (Z), Nat (..), natToInt) import NoThunks.Class (NoThunks (..), unsafeNoThunks) -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.State +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1.State ( ObjectDiffusionInboundStateView (..) ) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API @@ -146,7 +146,7 @@ objectDiffusionInbound _version controlMessageSTM state = - ObjectDiffusionInboundPipelined $ do + ObjectDiffusionInboundPipelined $ pure $ continueWithStateM (go Zero) initialInboundSt where canRequestMoreObjects :: InboundSt k object -> Bool @@ -320,7 +320,7 @@ objectDiffusionInbound -- request. let st' = st{numIdsInFlight = numIdsInFlight st - numIdsRequested} poolHasObject <- atomically $ opwHasObject - continueWithStateM + pure $ continueWithStateM (go n) (preAcknowledge st' poolHasObject collectedIds) CollectObjects requestedIds collectedObjects -> do @@ -368,7 +368,7 @@ objectDiffusionInbound traceWith tracer $ TraceObjectDiffusionProcessed (NumObjectsProcessed (fromIntegral $ length objectsToAck)) - continueWithStateM + pure $ continueWithStateM (go n) st { pendingObjects = pendingObjects'' @@ -392,14 +392,14 @@ objectDiffusionInbound $ SendMsgRequestObjectIdsBlocking (numToAckOnNextReq st) numIdsToRequest - ( \neCollectedIds -> do + ( \neCollectedIds -> WithEffect $ do -- We just got some new object id's, so we are no longer idling -- -- NOTE this change of state should be made explicit: -- https://github.com/tweag/cardano-peras/issues/144 Idling.idlingStop (odisvIdling state) traceWith tracer TraceObjectInboundStoppedIdling - collectAndContinueWithState + pure $ collectAndContinueWithState (goCollect Zero) st { numToAckOnNextReq = 0 @@ -433,10 +433,9 @@ objectDiffusionInbound let numIdsToRequest = numIdsToReq st if numIdsToRequest <= 0 - then continueWithStateM (go n) st + then pure $ continueWithStateM (go n) st else - pure $ - SendMsgRequestObjectIdsPipelined + pure $ SendMsgRequestObjectIdsPipelined (numToAckOnNextReq st) numIdsToRequest ( continueWithStateM @@ -454,8 +453,8 @@ objectDiffusionInbound terminateAfterDrain :: Nat n -> InboundStIdle n objectId object m () terminateAfterDrain = \case - Zero -> SendMsgDone (pure ()) - Succ n -> CollectPipelined Nothing $ \_ignoredMsg -> pure $ terminateAfterDrain n + Zero -> SendMsgDone () + Succ n -> CollectPipelined Nothing $ \_ignoredMsg -> terminateAfterDrain n ------------------------------------------------------------------------------- -- Utilities to deal with stateful continuations (copied from TX-submission) @@ -487,9 +486,9 @@ continueWithStateM :: NoThunks s => StatefulM s n objectId object m -> s -> - m (InboundStIdle n objectId object m ()) + InboundStIdle n objectId object m () continueWithStateM (StatefulM f) !st = - checkInvariant (show <$> unsafeNoThunks st) (f st) + checkInvariant (show <$> unsafeNoThunks st) (WithEffect $! f st) {-# NOINLINE continueWithStateM #-} -- | A variant of 'continueWithState' to be more easily utilized with @@ -499,7 +498,7 @@ collectAndContinueWithState :: StatefulCollect s n objectId object m -> s -> Collect objectId object -> - m (InboundStIdle n objectId object m ()) + InboundStIdle n objectId object m () collectAndContinueWithState (StatefulCollect f) !st c = - checkInvariant (show <$> unsafeNoThunks st) (f st c) + checkInvariant (show <$> unsafeNoThunks st) (WithEffect $! f st c) {-# NOINLINE collectAndContinueWithState #-} diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 09a3fd3700..56fdb1fe34 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -20,79 +20,58 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 , module V2 , PeerDecisionChannelsVar , newPeerDecisionChannelsVar - , ObjectPoolSem - , newObjectPoolSem - , DecisionGlobalStateVar - , newDecisionGlobalStateVar , DecisionPolicy (..) , defaultDecisionPolicy ) where -import Control.Concurrent.Class.MonadSTM (atomically) -import Control.Exception (assert) -import Control.Monad (unless, when) +import Control.Concurrent.Class.MonadSTM (atomically, MonadSTM) import Control.Monad.Class.MonadThrow -import Control.Monad.Class.MonadTimer.SI import Control.Tracer (Tracer, traceWith) import Data.List.NonEmpty qualified as NonEmpty -import Data.Map.Strict qualified as Map -import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set import Network.TypedProtocol import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State qualified as State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types as V2 -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API import Ouroboros.Network.ControlMessage (ControlMessage (..), ControlMessageSTM) import Ouroboros.Network.Protocol.ObjectDiffusion.Inbound -import Network.TypedProtocol.Peer -import Ouroboros.Network.Protocol.ObjectDiffusion.Type + +-- TODO: Add checks and validation -- | A object-diffusion inbound side (client). -- --- The goIdle' function blocks on receiving `PeerDecision` from the decision logic. +-- The steps are as follow +-- 1. Block on next decision from the decision logic +-- 2. Handle any available reply (`goCollect`) +-- 3. Request new objects if possible (`goReqObjects`) +-- 4. Request new ids (also responsible for ack) (`goReqIds`) +-- 5. signal psaOnDecisionExecuted (as part of `goReqIds{Blocking,NonBlocking}`) +-- And loop again +-- We need to make sure we don't go again into `goIdle` until `psaOnDecisionExecuted` has been called objectDiffusionInbound :: - forall objectId object ticketNo m. - ( MonadDelay m - , MonadThrow m - , Ord objectId - ) => - Tracer m (TraceObjectDiffusionInbound objectId object) -> - ObjectDiffusionInitDelay -> + forall objectId object m. + ( MonadThrow m + , MonadSTM m + ) =>Tracer m (TraceObjectDiffusionInbound objectId object) -> ControlMessageSTM m -> PeerStateAPI m objectId object -> ObjectDiffusionInboundPipelined objectId object m () objectDiffusionInbound tracer - initDelay controlMessageSTM PeerStateAPI { psaReadDecision , psaOnDecisionExecuted , psaOnRequestIds , psaOnRequestObjects - , psaOnReceivedIds - , psaOnReceivedObjects + , psaOnReceiveIds + , psaOnReceiveObjects } = - ObjectDiffusionInboundPipelined $ do - -- TODO: delete initDelay - case initDelay of - ObjectDiffusionInitDelay delay -> threadDelay delay - NoObjectDiffusionInitDelay -> return () - (goIdle Zero) + ObjectDiffusionInboundPipelined $ pure $ goIdle Zero where - terminateAfterDrain :: - Nat n -> InboundStIdle n objectId object m () - terminateAfterDrain = \case - Zero -> SendMsgDone (pure ()) - Succ n -> CollectPipelined Nothing $ \_ignoredMsg -> pure $ terminateAfterDrain n - - -- Wrapper around goIdle' that handles termination on reception of - -- Terminate control message. - goIdle :: forall (n :: N). Nat n -> m (InboundStIdle n objectId object m ()) - goIdle n = do + goIdle :: forall (n :: N). Nat n -> InboundStIdle n objectId object m () + goIdle n = WithEffect $ do ctrlMsg <- atomically controlMessageSTM traceWith tracer $ TraceObjectDiffusionInboundReceivedControlMessage ctrlMsg case ctrlMsg of @@ -100,37 +79,43 @@ objectDiffusionInbound Terminate -> pure $ terminateAfterDrain n -- Otherwise, we can continue the protocol normally. - _continue -> goIdle' n + _continue -> do + -- Block on next decision. + decision <- psaReadDecision + traceWith tracer (TraceObjectDiffusionInboundReceivedDecision decision) + pure $ goCollect n decision - goIdle' :: forall (n :: N). Nat n -> m (InboundStIdle n objectId object m ()) - goIdle' n = do - -- Block on next decision. - decision@PeerDecision - { pdNumIdsToAck - , pdNumIdsToReq - , pdObjectsToReqIds - , pdCanPipelineIdsRequests - } <- - psaReadDecision - traceWith tracer (TraceObjectDiffusionInboundReceivedDecision decision) - - -- We need to make sure we don't go again into `goIdle` until `psaOnDecisionExecuted` has been called - case n of - Zero -> goReqObjectsAndIds Zero decision - n@Succ{} -> pure $ - CollectPipelined - (Just (goReqObjectsAndIds n decision)) - (\collectResult -> undefined) -- loopUntilAllCollected; goReqObjectsAndIds n) - - goReqObjectsAndIds :: + terminateAfterDrain :: + Nat n -> InboundStIdle n objectId object m () + terminateAfterDrain = \case + Zero -> SendMsgDone () + Succ n -> CollectPipelined Nothing $ \_ignoredMsg -> terminateAfterDrain n + + goCollect :: Nat n -> PeerDecision objectId object -> InboundStIdle n objectId object m () + goCollect Zero decision = + goReqObjects Zero decision + goCollect (Succ n) decision = + CollectPipelined + (Just $ goReqObjects (Succ n) decision) + (\case + CollectObjectIds numIdsRequested ids -> WithEffect $ do + -- TODO: Add checks and validation + psaOnReceiveIds numIdsRequested ids + pure $ goCollect n decision + CollectObjects _objectIds objects -> WithEffect $ do + -- TODO: Add checks and validation + psaOnReceiveObjects objects + pure $ goCollect n decision) + + goReqObjects :: Nat n -> PeerDecision objectId object -> - m (InboundStIdle n objectId object m ()) - goReqObjectsAndIds n object@PeerDecision{pdObjectsToReqIds} = + InboundStIdle n objectId object m () + goReqObjects n object@PeerDecision{pdObjectsToReqIds} = if Set.null pdObjectsToReqIds then goReqIds n object - else do + else WithEffect $ do psaOnRequestObjects pdObjectsToReqIds pure $ SendMsgRequestObjectsPipelined (Set.toList pdObjectsToReqIds) @@ -140,7 +125,7 @@ objectDiffusionInbound forall (n :: N). Nat n -> PeerDecision objectId object -> - m (InboundStIdle n objectId object m ()) + InboundStIdle n objectId object m () goReqIds n pd@PeerDecision{pdCanPipelineIdsRequests} = if pdCanPipelineIdsRequests then goReqIdsPipelined n pd @@ -150,33 +135,34 @@ objectDiffusionInbound goReqIdsBlocking :: PeerDecision objectId object -> - m (InboundStIdle Z objectId object m ()) - goReqIdsBlocking PeerDecision{pdNumIdsToAck, pdNumIdsToReq} = + InboundStIdle Z objectId object m () + goReqIdsBlocking PeerDecision{pdNumIdsToAck, pdNumIdsToReq} = WithEffect $ do if pdNumIdsToReq == 0 then do psaOnDecisionExecuted - goIdle Zero + pure $ goIdle Zero else do psaOnRequestIds pdNumIdsToAck pdNumIdsToReq psaOnDecisionExecuted pure $ SendMsgRequestObjectIdsBlocking pdNumIdsToAck pdNumIdsToReq - ( \objectIds -> do - psaOnReceivedIds pdNumIdsToReq (NonEmpty.toList objectIds) - goIdle Zero + ( \objectIds -> WithEffect $ do + -- TODO: Add checks and validation + psaOnReceiveIds pdNumIdsToReq (NonEmpty.toList objectIds) + pure $ goIdle Zero ) goReqIdsPipelined :: forall (n :: N). Nat n -> PeerDecision objectId object -> - m (InboundStIdle n objectId object m ()) - goReqIdsPipelined n PeerDecision{pdNumIdsToAck, pdNumIdsToReq} = + InboundStIdle n objectId object m () + goReqIdsPipelined n PeerDecision{pdNumIdsToAck, pdNumIdsToReq} = WithEffect $ do if pdNumIdsToReq == 0 then do psaOnDecisionExecuted - goIdle n + pure $ goIdle n else do psaOnRequestIds pdNumIdsToAck pdNumIdsToReq psaOnDecisionExecuted @@ -184,111 +170,3 @@ objectDiffusionInbound pdNumIdsToAck pdNumIdsToReq (goIdle (Succ n)) - - goCollectIds :: Nat n -> NumObjectIdsReq -> [objectId] -> m (InboundStIdle n objectId object m ()) - goCollectIds n numIdsRequested ids = do - psaOnReceivedIds numIdsRequested ids - undefined - - --------------------------------------------------------------------------- - -- OLD STUFF FOR REFERENCE BELOW - --------------------------------------------------------------------------- - - goReqIds' - -- if there are no unacknowledged objectIds, the protocol requires sending - -- a blocking `MsgRequestObjectIds` request. This is important, as otherwise - -- the client side wouldn't have a chance to terminate the - -- mini-protocol. - Zero - PeerDecision - { pdNumIdsToAck = objectIdsToAck - , pdCanPipelineIdsRequests = False - , pdNumIdsToReq = objectIdsToReq - } = - pure $ - SendMsgRequestObjectIdsBlocking - objectIdsToAck - objectIdsToReq - -- Our result if the client terminates the protocol - -- (traceWith tracer TraceObjectDiffusionInboundTerminated) - ( \objectIds -> do - let objectIds' = NonEmpty.toList objectIds - receivedIdsSeq = StrictSeq.fromList $ fst <$> objectIds' - objectIdsMap = Map.fromList objectIds' - when (StrictSeq.length receivedIdsSeq > fromIntegral objectIdsToReq) $ - throwIO ProtocolErrorObjectIdsNotRequested - onReceiveIds objectIdsToReq receivedIdsSeq objectIdsMap - goIdle - ) - goReqIds' - n@Zero - PeerDecision - { pdNumIdsToAck = objectIdsToAck - , pdCanPipelineIdsRequests = True - , pdNumIdsToReq = objectIdsToReq - } = - pure $ - SendMsgRequestObjectIdsPipelined - objectIdsToAck - objectIdsToReq - (handleReplies (Succ n)) - goReqIds' - n@Succ{} - PeerDecision - { pdNumIdsToAck = objectIdsToAck - , pdCanPipelineIdsRequests - , pdNumIdsToReq = objectIdsToReq - } = - -- it is impossible that we have had `object`'s to request (Succ{} - is an - -- evidence for that), but no unacknowledged `objectId`s. - assert pdCanPipelineIdsRequests $ - pure $ - SendMsgRequestObjectIdsPipelined - objectIdsToAck - objectIdsToReq - (handleReplies (Succ n)) - - handleReplies :: - forall (n :: N). - Nat (S n) -> - m (InboundStIdle (S n) objectId object m ()) - handleReplies (Succ n'@Succ{}) = - pure $ - CollectPipelined - Nothing - (handleReply (handleReplies n')) - handleReplies (Succ Zero) = - pure $ - CollectPipelined - Nothing - (handleReply goIdle) - - handleReply :: - forall (n :: N). - m (InboundStIdle n objectId object m ()) -> - -- continuation - Collect objectId object -> - m (InboundStIdle n objectId object m ()) - handleReply k = \case - CollectObjectIds objectIdsToReq objectIds -> do - let receivedIdsSeq = StrictSeq.fromList $ fst <$> objectIds - objectIdsMap = Map.fromList objectIds - unless (StrictSeq.length receivedIdsSeq <= fromIntegral objectIdsToReq) $ - throwIO ProtocolErrorObjectIdsNotRequested - onReceiveIds objectIdsToReq receivedIdsSeq objectIdsMap - k - CollectObjects objectIds objects -> do - let requested = Map.keysSet objectIds - received = Map.fromList undefined - - unless (Map.keysSet received `Set.isSubsetOf` requested) $ - throwIO ProtocolErrorObjectNotRequested - - mbe <- onReceiveObjects objectIds received - traceWith tracer $ TraceObjectDiffusionCollected (getId `map` objects) - case mbe of - -- one of `object`s had a wrong size - Just e -> - traceWith tracer (TraceObjectInboundError e) - >> throwIO e - Nothing -> k diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 48d055ec9a..a7a5119f40 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -14,7 +14,6 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision ) where import Data.Foldable qualified as Foldable -import Data.Hashable import Data.Map.Merge.Strict qualified as Map import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map @@ -36,7 +35,6 @@ makeDecisions :: forall peerAddr objectId object. ( Ord peerAddr , Ord objectId - , Hashable peerAddr ) => StdGen -> (objectId -> Bool) -> @@ -131,7 +129,6 @@ computeAck poolHasObject DecisionPolicy{dpMaxNumObjectIdsReq, dpMaxNumObjectsOut ) orderPeers :: - Hashable peerAddr => StdGen -> Map peerAddr (DecisionPeerState objectId object) -> [(peerAddr, DecisionPeerState objectId object)] @@ -149,7 +146,6 @@ pickObjectsToReq :: forall peerAddr objectId object. ( Ord peerAddr , Ord objectId - , Hashable peerAddr ) => StdGen -> (objectId -> Bool) -> diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 612e20891a..a09327cc80 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -25,11 +25,9 @@ import Control.Monad.Class.MonadThrow import Control.Monad.Class.MonadTime.SI import Control.Monad.Class.MonadTimer.SI import Control.Tracer (Tracer, traceWith) -import Data.Foldable as Foldable (foldl', traverse_) -import Data.Hashable +import Data.Foldable as Foldable (traverse_) import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map -import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq import Data.Set (Set) import Data.Set qualified as Set @@ -63,9 +61,9 @@ data PeerStateAPI m objectId object = PeerStateAPI -- Marks the peer as available for the `makeDecision` logic , psaOnRequestIds :: NumObjectIdsAck -> NumObjectIdsReq -> m () , psaOnRequestObjects :: Set objectId -> m () - , psaOnReceivedIds :: NumObjectIdsReq -> [objectId] -> m () + , psaOnReceiveIds :: NumObjectIdsReq -> [objectId] -> m () -- ^ Error handling should have been done before calling this - , psaOnReceivedObjects :: [object] -> m () + , psaOnReceiveObjects :: [object] -> m () -- ^ Error handling should have been done before calling this -- Also every object should have been validated! } @@ -80,7 +78,6 @@ withPeer :: , MonadSTM m , Ord objectId , Ord peerAddr - , Show peerAddr ) => Tracer m (TraceDecisionLogic peerAddr objectId object) -> Tracer m (TraceObjectDiffusionInbound objectId object) -> @@ -139,23 +136,20 @@ withPeer objectDiffusionTracer decisionTracer globalStateVar - objectPoolWriter peerAddr , psaOnRequestObjects = State.onRequestObjects objectDiffusionTracer decisionTracer globalStateVar - objectPoolWriter peerAddr - , psaOnReceivedIds = + , psaOnReceiveIds = State.onReceiveIds objectDiffusionTracer decisionTracer globalStateVar - objectPoolWriter peerAddr - , psaOnReceivedObjects = + , psaOnReceiveObjects = State.onReceiveObjects objectDiffusionTracer decisionTracer @@ -222,11 +216,9 @@ decisionLogicThread :: , MonadMVar m , MonadSTM m , MonadFork m - , MonadMask m , MonadIO m , Ord peerAddr , Ord objectId - , Hashable peerAddr ) => Tracer m (TraceDecisionLogic peerAddr objectId object) -> Tracer m ObjectDiffusionCounters -> diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 0c78e41962..1d00f0e496 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -36,13 +36,12 @@ onRequestIds :: Tracer m (TraceObjectDiffusionInbound objectId object) -> Tracer m (TraceDecisionLogic peerAddr objectId object) -> DecisionGlobalStateVar m peerAddr objectId object -> - ObjectPoolWriter objectId object m -> peerAddr -> NumObjectIdsAck -> -- | number of requests to req NumObjectIdsReq -> m () -onRequestIds odTracer decisionTracer globalStateVar _objectPoolWriter peerAddr numIdsToAck numIdsToReq = do +onRequestIds odTracer decisionTracer globalStateVar peerAddr numIdsToAck numIdsToReq = do globalState' <- atomically $ do stateTVar globalStateVar @@ -57,7 +56,7 @@ onRequestIds odTracer decisionTracer globalStateVar _objectPoolWriter peerAddr n -- That's why we update the dpsOutstandingFifo and dpsObjectsAvailableIds here. onRequestIdsImpl :: forall peerAddr object objectId. - (Ord objectId, Ord peerAddr, HasCallStack) => + (Ord objectId, Ord peerAddr) => peerAddr -> NumObjectIdsAck -> -- | number of requests to req @@ -108,12 +107,11 @@ onRequestObjects :: Tracer m (TraceObjectDiffusionInbound objectId object) -> Tracer m (TraceDecisionLogic peerAddr objectId object) -> DecisionGlobalStateVar m peerAddr objectId object -> - ObjectPoolWriter objectId object m -> peerAddr -> -- | objets to request, by id Set objectId -> m () -onRequestObjects odTracer decisionTracer globalStateVar _objectPoolWriter peerAddr objectIds = do +onRequestObjects odTracer decisionTracer globalStateVar peerAddr objectIds = do globalState' <- atomically $ do stateTVar globalStateVar @@ -126,7 +124,7 @@ onRequestObjects odTracer decisionTracer globalStateVar _objectPoolWriter peerAd onRequestObjectsImpl :: forall peerAddr object objectId. - (Ord objectId, Ord peerAddr, HasCallStack) => + (Ord objectId, Ord peerAddr) => peerAddr -> -- | objets to request, by id Set objectId -> @@ -162,7 +160,6 @@ onReceiveIds :: Tracer m (TraceObjectDiffusionInbound objectId object) -> Tracer m (TraceDecisionLogic peerAddr objectId object) -> DecisionGlobalStateVar m peerAddr objectId object -> - ObjectPoolWriter objectId object m -> peerAddr -> -- | number of requests to subtract from -- `dpsNumIdsInflight` @@ -171,13 +168,12 @@ onReceiveIds :: [objectId] -> -- | received `objectId`s m () -onReceiveIds odTracer decisionTracer globalStateVar objectPoolWriter peerAddr numIdsInitiallyRequested receivedIds = do +onReceiveIds odTracer decisionTracer globalStateVar peerAddr numIdsInitiallyRequested receivedIds = do globalState' <- atomically $ do - hasObject <- opwHasObject objectPoolWriter stateTVar globalStateVar ( \globalState -> - let globalState' = onReceiveIdsImpl hasObject peerAddr numIdsInitiallyRequested receivedIds globalState + let globalState' = onReceiveIdsImpl peerAddr numIdsInitiallyRequested receivedIds globalState in (globalState', globalState') ) traceWith odTracer (TraceObjectDiffusionInboundReceivedIds (length receivedIds)) @@ -186,9 +182,6 @@ onReceiveIds odTracer decisionTracer globalStateVar objectPoolWriter peerAddr nu onReceiveIdsImpl :: forall peerAddr object objectId. (Ord objectId, Ord peerAddr, HasCallStack) => - -- | check if objectId is in the objectpool, ref - -- 'opwHasObject' - (objectId -> Bool) -> peerAddr -> -- | number of requests to subtract from -- `dpsNumIdsInflight` @@ -198,7 +191,6 @@ onReceiveIdsImpl :: DecisionGlobalState peerAddr objectId object -> DecisionGlobalState peerAddr objectId object onReceiveIdsImpl - hasObject peerAddr numIdsInitiallyRequested receivedIds @@ -211,7 +203,6 @@ onReceiveIdsImpl where peerState@DecisionPeerState { dpsOutstandingFifo - , dpsObjectsInflightIds , dpsObjectsAvailableIds , dpsNumIdsInflight } = diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 66562e265b..bcb4d27d6d 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -31,10 +31,6 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types , ObjectDiffusionCounters (..) , makeObjectDiffusionCounters - -- * Init delay - , ObjectDiffusionInitDelay (..) - , defaultObjectDiffusionInitDelay - -- * Copied from V1 , NumObjectsProcessed (..) , TraceObjectDiffusionInbound (..) @@ -49,7 +45,6 @@ import Control.Concurrent.Class.MonadSTM.Strict (MonadSTM, StrictTVar, atomicall import Control.Concurrent.Class.MonadSTM.TSem (TSem, newTSem) import Control.DeepSeq (NFData) import Control.Exception (Exception (..)) -import Control.Monad.Class.MonadTime.SI import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map import Data.Monoid (Sum (..)) @@ -234,14 +229,6 @@ makeObjectDiffusionCounters , odcNumDistinctObjectsOwtPool = Map.size $ dgsObjectsOwtPoolMultiplicities dgs } -data ObjectDiffusionInitDelay - = ObjectDiffusionInitDelay DiffTime - | NoObjectDiffusionInitDelay - deriving (Eq, Show) - -defaultObjectDiffusionInitDelay :: ObjectDiffusionInitDelay -defaultObjectDiffusionInitDelay = ObjectDiffusionInitDelay 60 - -- Copied from V1: newtype NumObjectsProcessed @@ -273,8 +260,6 @@ data TraceObjectDiffusionInbound objectId object | -- | Received a 'ControlMessage' from the outbound peer governor, and about -- to act on it. TraceObjectDiffusionInboundReceivedControlMessage ControlMessage - | TraceObjectDiffusionInboundCanRequestMoreObjects Int - | TraceObjectDiffusionInboundCannotRequestMoreObjects Int | TraceObjectDiffusionInboundReceivedDecision (PeerDecision objectId object) deriving (Eq, Show) diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ObjectDiffusion/Smoke.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ObjectDiffusion/Smoke.hs index ab9d34aaee..3553b8cc68 100644 --- a/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ObjectDiffusion/Smoke.hs +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ObjectDiffusion/Smoke.hs @@ -31,7 +31,7 @@ import NoThunks.Class (NoThunks) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1 ( objectDiffusionInbound ) -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.State +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1.State ( ObjectDiffusionInboundStateView (..) ) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API From 8bd600762f2d76aa6aba3c19ccd33aecf2cfdb2c Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Wed, 15 Oct 2025 14:00:18 +0200 Subject: [PATCH 30/43] Fix formatting --- .../ObjectDiffusion/Inbound/V1.hs | 49 +-- .../ObjectDiffusion/Inbound/V2.hs | 70 ++-- .../ObjectDiffusion/Inbound/V2/Decision.hs | 321 +++++++++--------- .../ObjectDiffusion/Inbound/V2/Registry.hs | 8 +- .../ObjectDiffusion/Inbound/V2/State.hs | 3 +- .../ObjectDiffusion/Inbound/V2/Types.hs | 3 +- .../MiniProtocol/ObjectDiffusion/PerasCert.hs | 2 +- 7 files changed, 236 insertions(+), 220 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs index a9e65d27e2..cec38d41ae 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs @@ -146,8 +146,9 @@ objectDiffusionInbound _version controlMessageSTM state = - ObjectDiffusionInboundPipelined $ pure $ - continueWithStateM (go Zero) initialInboundSt + ObjectDiffusionInboundPipelined $ + pure $ + continueWithStateM (go Zero) initialInboundSt where canRequestMoreObjects :: InboundSt k object -> Bool canRequestMoreObjects st = @@ -320,9 +321,10 @@ objectDiffusionInbound -- request. let st' = st{numIdsInFlight = numIdsInFlight st - numIdsRequested} poolHasObject <- atomically $ opwHasObject - pure $ continueWithStateM - (go n) - (preAcknowledge st' poolHasObject collectedIds) + pure $ + continueWithStateM + (go n) + (preAcknowledge st' poolHasObject collectedIds) CollectObjects requestedIds collectedObjects -> do let requestedIdsSet = Set.fromList requestedIds obtainedIdsSet = Set.fromList (opwObjectId <$> collectedObjects) @@ -368,15 +370,16 @@ objectDiffusionInbound traceWith tracer $ TraceObjectDiffusionProcessed (NumObjectsProcessed (fromIntegral $ length objectsToAck)) - pure $ continueWithStateM - (go n) - st - { pendingObjects = pendingObjects'' - , outstandingFifo = outstandingFifo' - , numToAckOnNextReq = - numToAckOnNextReq st - + fromIntegral (Seq.length objectIdsToAck) - } + pure $ + continueWithStateM + (go n) + st + { pendingObjects = pendingObjects'' + , outstandingFifo = outstandingFifo' + , numToAckOnNextReq = + numToAckOnNextReq st + + fromIntegral (Seq.length objectIdsToAck) + } goReqObjectIdsBlocking :: Stateful (InboundSt objectId object) 'Z objectId object m goReqObjectIdsBlocking = Stateful $ \st -> do @@ -399,13 +402,14 @@ objectDiffusionInbound -- https://github.com/tweag/cardano-peras/issues/144 Idling.idlingStop (odisvIdling state) traceWith tracer TraceObjectInboundStoppedIdling - pure $ collectAndContinueWithState - (goCollect Zero) - st - { numToAckOnNextReq = 0 - , numIdsInFlight = numIdsToRequest - } - (CollectObjectIds numIdsToRequest (NonEmpty.toList neCollectedIds)) + pure $ + collectAndContinueWithState + (goCollect Zero) + st + { numToAckOnNextReq = 0 + , numIdsInFlight = numIdsToRequest + } + (CollectObjectIds numIdsToRequest (NonEmpty.toList neCollectedIds)) ) goReqObjectsAndObjectIdsPipelined :: @@ -435,7 +439,8 @@ objectDiffusionInbound if numIdsToRequest <= 0 then pure $ continueWithStateM (go n) st else - pure $ SendMsgRequestObjectIdsPipelined + pure $ + SendMsgRequestObjectIdsPipelined (numToAckOnNextReq st) numIdsToRequest ( continueWithStateM diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 56fdb1fe34..8c7bf9d5ab 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -1,4 +1,5 @@ {-# LANGUAGE BangPatterns #-} +{-# LANGUAGE BlockArguments #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE ImportQualifiedPost #-} @@ -6,7 +7,6 @@ {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE BlockArguments #-} module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 ( -- * ObjectDiffusion Inbound client @@ -24,7 +24,7 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2 , defaultDecisionPolicy ) where -import Control.Concurrent.Class.MonadSTM (atomically, MonadSTM) +import Control.Concurrent.Class.MonadSTM (MonadSTM, atomically) import Control.Monad.Class.MonadThrow import Control.Tracer (Tracer, traceWith) import Data.List.NonEmpty qualified as NonEmpty @@ -52,7 +52,8 @@ objectDiffusionInbound :: forall objectId object m. ( MonadThrow m , MonadSTM m - ) =>Tracer m (TraceObjectDiffusionInbound objectId object) -> + ) => + Tracer m (TraceObjectDiffusionInbound objectId object) -> ControlMessageSTM m -> PeerStateAPI m objectId object -> ObjectDiffusionInboundPipelined objectId object m () @@ -69,7 +70,6 @@ objectDiffusionInbound } = ObjectDiffusionInboundPipelined $ pure $ goIdle Zero where - goIdle :: forall (n :: N). Nat n -> InboundStIdle n objectId object m () goIdle n = WithEffect $ do ctrlMsg <- atomically controlMessageSTM @@ -80,10 +80,10 @@ objectDiffusionInbound pure $ terminateAfterDrain n -- Otherwise, we can continue the protocol normally. _continue -> do - -- Block on next decision. - decision <- psaReadDecision - traceWith tracer (TraceObjectDiffusionInboundReceivedDecision decision) - pure $ goCollect n decision + -- Block on next decision. + decision <- psaReadDecision + traceWith tracer (TraceObjectDiffusionInboundReceivedDecision decision) + pure $ goCollect n decision terminateAfterDrain :: Nat n -> InboundStIdle n objectId object m () @@ -97,15 +97,16 @@ objectDiffusionInbound goCollect (Succ n) decision = CollectPipelined (Just $ goReqObjects (Succ n) decision) - (\case - CollectObjectIds numIdsRequested ids -> WithEffect $ do - -- TODO: Add checks and validation - psaOnReceiveIds numIdsRequested ids - pure $ goCollect n decision - CollectObjects _objectIds objects -> WithEffect $ do - -- TODO: Add checks and validation - psaOnReceiveObjects objects - pure $ goCollect n decision) + ( \case + CollectObjectIds numIdsRequested ids -> WithEffect $ do + -- TODO: Add checks and validation + psaOnReceiveIds numIdsRequested ids + pure $ goCollect n decision + CollectObjects _objectIds objects -> WithEffect $ do + -- TODO: Add checks and validation + psaOnReceiveObjects objects + pure $ goCollect n decision + ) goReqObjects :: Nat n -> @@ -113,13 +114,14 @@ objectDiffusionInbound InboundStIdle n objectId object m () goReqObjects n object@PeerDecision{pdObjectsToReqIds} = if Set.null pdObjectsToReqIds - then - goReqIds n object - else WithEffect $ do - psaOnRequestObjects pdObjectsToReqIds - pure $ SendMsgRequestObjectsPipelined - (Set.toList pdObjectsToReqIds) - (goReqIds (Succ n) object) + then + goReqIds n object + else WithEffect $ do + psaOnRequestObjects pdObjectsToReqIds + pure $ + SendMsgRequestObjectsPipelined + (Set.toList pdObjectsToReqIds) + (goReqIds (Succ n) object) goReqIds :: forall (n :: N). @@ -144,14 +146,15 @@ objectDiffusionInbound else do psaOnRequestIds pdNumIdsToAck pdNumIdsToReq psaOnDecisionExecuted - pure $ SendMsgRequestObjectIdsBlocking - pdNumIdsToAck - pdNumIdsToReq - ( \objectIds -> WithEffect $ do + pure $ + SendMsgRequestObjectIdsBlocking + pdNumIdsToAck + pdNumIdsToReq + ( \objectIds -> WithEffect $ do -- TODO: Add checks and validation psaOnReceiveIds pdNumIdsToReq (NonEmpty.toList objectIds) pure $ goIdle Zero - ) + ) goReqIdsPipelined :: forall (n :: N). @@ -166,7 +169,8 @@ objectDiffusionInbound else do psaOnRequestIds pdNumIdsToAck pdNumIdsToReq psaOnDecisionExecuted - pure $ SendMsgRequestObjectIdsPipelined - pdNumIdsToAck - pdNumIdsToReq - (goIdle (Succ n)) + pure $ + SendMsgRequestObjectIdsPipelined + pdNumIdsToAck + pdNumIdsToReq + (goIdle (Succ n)) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index a7a5119f40..dc5ccc2ec5 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -47,14 +47,15 @@ makeDecisions :: -- | New decisions Map peerAddr (PeerDecision objectId object) makeDecisions rng hasObject decisionPolicy globalState prevDecisions = - let -- A subset of peers are currently executing a decision. We shouldn't update the decision for them - frozenPeersToDecisions = Map.filter pdExecutingDecision prevDecisions + let + -- A subset of peers are currently executing a decision. We shouldn't update the decision for them + frozenPeersToDecisions = Map.filter pdExecutingDecision prevDecisions - -- We do it in two steps, because computing the acknowledgment tell which objects from dpsObjectsAvailableIds sets of each peer won't actually be available anymore (as soon as we ack them), - -- so that the pickObjectsToReq function can take this into account. - (ackAndRequestIdsDecisions, peerToIdsToAck) = computeAck hasObject decisionPolicy globalState frozenPeersToDecisions - peersToObjectsToReq = pickObjectsToReq rng hasObject decisionPolicy globalState frozenPeersToDecisions peerToIdsToAck - in + -- We do it in two steps, because computing the acknowledgment tell which objects from dpsObjectsAvailableIds sets of each peer won't actually be available anymore (as soon as we ack them), + -- so that the pickObjectsToReq function can take this into account. + (ackAndRequestIdsDecisions, peerToIdsToAck) = computeAck hasObject decisionPolicy globalState frozenPeersToDecisions + peersToObjectsToReq = pickObjectsToReq rng hasObject decisionPolicy globalState frozenPeersToDecisions peerToIdsToAck + in Map.intersectionWith (\decision objectsToReqIds -> decision{pdObjectsToReqIds = objectsToReqIds}) ackAndRequestIdsDecisions @@ -77,13 +78,15 @@ computeAck :: , Map peerAddr (Set objectId) ) computeAck poolHasObject DecisionPolicy{dpMaxNumObjectIdsReq, dpMaxNumObjectsOutstanding} DecisionGlobalState{dgsPeerStates} frozenPeersToDecisions = - let -- We shouldn't create a new decision for peers that are currently executing a decision - filteredPeerStates = Map.withoutKeys dgsPeerStates (Map.keysSet frozenPeersToDecisions) - (decisions, peerToIdsToAck) = - Map.foldlWithKey' computeAckForPeer (Map.empty, Map.empty) filteredPeerStates - in ( decisions - , peerToIdsToAck - ) + let + -- We shouldn't create a new decision for peers that are currently executing a decision + filteredPeerStates = Map.withoutKeys dgsPeerStates (Map.keysSet frozenPeersToDecisions) + (decisions, peerToIdsToAck) = + Map.foldlWithKey' computeAckForPeer (Map.empty, Map.empty) filteredPeerStates + in + ( decisions + , peerToIdsToAck + ) where computeAckForPeer :: -- \| Accumulator containing decisions already made for other peers @@ -132,7 +135,7 @@ orderPeers :: StdGen -> Map peerAddr (DecisionPeerState objectId object) -> [(peerAddr, DecisionPeerState objectId object)] -orderPeers _rng = undefined -- TODO +orderPeers _rng = undefined -- TODO data DownloadPickState peerAddr objectId = DownloadPickState @@ -171,159 +174,159 @@ pickObjectsToReq } frozenPeersToDecisions peerToIdsToAck = - peersToObjectsToReq - where - -- We order the peers that are not currently executing a decision - orderedPeers = orderPeers rng (dgsPeerStates `Map.withoutKeys` Map.keysSet frozenPeersToDecisions) + peersToObjectsToReq + where + -- We order the peers that are not currently executing a decision + orderedPeers = orderPeers rng (dgsPeerStates `Map.withoutKeys` Map.keysSet frozenPeersToDecisions) - -- We want to map each objectId to the sorted list of peers that can provide it - -- For each peer we also indicate how many objects it has in flight at the moment - -- We filter out here the objects that are already in pool - objectsToSortedProviders :: Map objectId [(peerAddr, NumObjectsReq)] - objectsToSortedProviders = - -- We iterate over each peer and the corresponding available ids - -- and turn the map "inside-out" - Foldable.foldl' - ( \accMap (peerAddr, DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds}) -> - let - -- ids that will be acked for this peer won't be available anymore, so we should not consider them in the decision logic - idsToAckForThisPeer = - Map.findWithDefault - (error "invariant violated: peer must be in peerToIdsToAck map") - peerAddr - peerToIdsToAck - -- we should also remove objects that are already in the pool - interestingAndAvailableObjectIds = - Set.filter (not . poolHasObject) $ - dpsObjectsAvailableIds `Set.difference` idsToAckForThisPeer - in - -- we iterate over interestingAndAvailableObjectIds and add the peer to the list of providers for each object it can provide - Foldable.foldl' - ( \accMap' objectId -> Map.insertWith (++) objectId [(peerAddr, fromIntegral $ Set.size dpsObjectsInflightIds)] accMap' - ) - accMap - interestingAndAvailableObjectIds - ) - Map.empty - orderedPeers + -- We want to map each objectId to the sorted list of peers that can provide it + -- For each peer we also indicate how many objects it has in flight at the moment + -- We filter out here the objects that are already in pool + objectsToSortedProviders :: Map objectId [(peerAddr, NumObjectsReq)] + objectsToSortedProviders = + -- We iterate over each peer and the corresponding available ids + -- and turn the map "inside-out" + Foldable.foldl' + ( \accMap (peerAddr, DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds}) -> + let + -- ids that will be acked for this peer won't be available anymore, so we should not consider them in the decision logic + idsToAckForThisPeer = + Map.findWithDefault + (error "invariant violated: peer must be in peerToIdsToAck map") + peerAddr + peerToIdsToAck + -- we should also remove objects that are already in the pool + interestingAndAvailableObjectIds = + Set.filter (not . poolHasObject) $ + dpsObjectsAvailableIds `Set.difference` idsToAckForThisPeer + in + -- we iterate over interestingAndAvailableObjectIds and add the peer to the list of providers for each object it can provide + Foldable.foldl' + ( \accMap' objectId -> Map.insertWith (++) objectId [(peerAddr, fromIntegral $ Set.size dpsObjectsInflightIds)] accMap' + ) + accMap + interestingAndAvailableObjectIds + ) + Map.empty + orderedPeers - frozenPeerStatesWithDecisions = Map.intersectionWith (,) dgsPeerStates frozenPeersToDecisions + frozenPeerStatesWithDecisions = Map.intersectionWith (,) dgsPeerStates frozenPeersToDecisions - availablePeerStates = Map.withoutKeys dgsPeerStates (Map.keysSet frozenPeersToDecisions) + availablePeerStates = Map.withoutKeys dgsPeerStates (Map.keysSet frozenPeersToDecisions) - -- For frozen peers, we should consider that the objects in pdObjectsToReqIds will be requested soon, so we should consider them as inflight for the purpose of picking objects to request for other peers - objectsInFlightMultiplicitiesOfFrozenPeer = Map.foldl' - ( \accMap (DecisionPeerState{dpsObjectsInflightIds}, PeerDecision{pdObjectsToReqIds}) -> - Foldable.foldl' - ( \accMap' objectId -> Map.insertWith (+) objectId 1 accMap' - ) - accMap - (Set.union dpsObjectsInflightIds pdObjectsToReqIds) - ) - Map.empty - frozenPeerStatesWithDecisions - -- Finally, we add to the previous map the objects that are currently inflight from peers for which we will make a decision in this round - objectsInFlightMultiplicities = Map.foldl' - ( \accMap (DecisionPeerState{dpsObjectsInflightIds}) -> - Foldable.foldl' - ( \accMap' objectId -> Map.insertWith (+) objectId 1 accMap' - ) - accMap - dpsObjectsInflightIds - ) - objectsInFlightMultiplicitiesOfFrozenPeer - availablePeerStates - - totalNumObjectsInflight :: NumObjectsReq - totalNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 objectsInFlightMultiplicities + -- For frozen peers, we should consider that the objects in pdObjectsToReqIds will be requested soon, so we should consider them as inflight for the purpose of picking objects to request for other peers + objectsInFlightMultiplicitiesOfFrozenPeer = + Map.foldl' + ( \accMap (DecisionPeerState{dpsObjectsInflightIds}, PeerDecision{pdObjectsToReqIds}) -> + Foldable.foldl' + (\accMap' objectId -> Map.insertWith (+) objectId 1 accMap') + accMap + (Set.union dpsObjectsInflightIds pdObjectsToReqIds) + ) + Map.empty + frozenPeerStatesWithDecisions + -- Finally, we add to the previous map the objects that are currently inflight from peers for which we will make a decision in this round + objectsInFlightMultiplicities = + Map.foldl' + ( \accMap (DecisionPeerState{dpsObjectsInflightIds}) -> + Foldable.foldl' + (\accMap' objectId -> Map.insertWith (+) objectId 1 accMap') + accMap + dpsObjectsInflightIds + ) + objectsInFlightMultiplicitiesOfFrozenPeer + availablePeerStates - objectsOwtPoolMultiplicities = Map.foldl' - ( \accMap (DecisionPeerState{dpsObjectsOwtPool}) -> - Foldable.foldl' - ( \accMap' objectId -> Map.insertWith (+) objectId 1 accMap' - ) - accMap - (Map.keys dpsObjectsOwtPool) - ) - Map.empty - dgsPeerStates + totalNumObjectsInflight :: NumObjectsReq + totalNumObjectsInflight = fromIntegral $ Map.foldl' (+) 0 objectsInFlightMultiplicities - -- We also want to know for each objects how many peers have it in the inflight or owtPool, - -- meaning that we should receive them soon. - -- We should also add here the objects that are in the pdObjectsToReqIds of each peer decision for frozen peers, - -- if these ids are not already in dpsObjectsInflight or dpsObjectsOwtPool of this peer - objectsExpectedSoonMultiplicities :: Map objectId ObjectMultiplicity - objectsExpectedSoonMultiplicities = Map.unionWith (+) objectsInFlightMultiplicities objectsOwtPoolMultiplicities + objectsOwtPoolMultiplicities = + Map.foldl' + ( \accMap (DecisionPeerState{dpsObjectsOwtPool}) -> + Foldable.foldl' + (\accMap' objectId -> Map.insertWith (+) objectId 1 accMap') + accMap + (Map.keys dpsObjectsOwtPool) + ) + Map.empty + dgsPeerStates - -- Now we join objectsToSortedProviders and objectsExpectedSoonMultiplicities maps on objectId for easy fold - objectsToProvidersAndExpectedMultiplicities :: - Map objectId ([(peerAddr, NumObjectsReq)], ObjectMultiplicity) - objectsToProvidersAndExpectedMultiplicities = - Map.merge - -- if an objectId is missing from objectsExpectedSoonMultiplicities, then its expected multiplicity is 0 - (Map.mapMissing \_ providers -> (providers, 0)) - -- if an objectId is missing from objectsToSortedProviders, then we don't care about it - Map.dropMissing - -- Combine in a tuple the list of providers and the expected multiplicity - (Map.zipWithMatched \_ providers expectedMultiplicity -> (providers, expectedMultiplicity)) - objectsToSortedProviders - objectsExpectedSoonMultiplicities + -- We also want to know for each objects how many peers have it in the inflight or owtPool, + -- meaning that we should receive them soon. + -- We should also add here the objects that are in the pdObjectsToReqIds of each peer decision for frozen peers, + -- if these ids are not already in dpsObjectsInflight or dpsObjectsOwtPool of this peer + objectsExpectedSoonMultiplicities :: Map objectId ObjectMultiplicity + objectsExpectedSoonMultiplicities = Map.unionWith (+) objectsInFlightMultiplicities objectsOwtPoolMultiplicities - -- NOW HERE TAKE PLACE THE ACTUAL DECISION LOGIC AND ATTRIBUTION OF OBJECTS TO PEERS + -- Now we join objectsToSortedProviders and objectsExpectedSoonMultiplicities maps on objectId for easy fold + objectsToProvidersAndExpectedMultiplicities :: + Map objectId ([(peerAddr, NumObjectsReq)], ObjectMultiplicity) + objectsToProvidersAndExpectedMultiplicities = + Map.merge + -- if an objectId is missing from objectsExpectedSoonMultiplicities, then its expected multiplicity is 0 + (Map.mapMissing \_ providers -> (providers, 0)) + -- if an objectId is missing from objectsToSortedProviders, then we don't care about it + Map.dropMissing + -- Combine in a tuple the list of providers and the expected multiplicity + (Map.zipWithMatched \_ providers expectedMultiplicity -> (providers, expectedMultiplicity)) + objectsToSortedProviders + objectsExpectedSoonMultiplicities - -- The current decision logic is greedy on objects, so it will try to request as many copies of the same object as possible, - -- meaning we will have optimal coverage of the first objects, but might not request some other objects at all if they are (only) provided by peers that are already saturated. + -- NOW HERE TAKE PLACE THE ACTUAL DECISION LOGIC AND ATTRIBUTION OF OBJECTS TO PEERS - -- Now we compute the actual attribution of downloads for peers - DownloadPickState{peersToObjectsToReq} = - -- We iterate over each objectId and the corresponding (providers, expectedMultiplicity) - Map.foldlWithKey' - ( \st objectId (providers, expectedMultiplicity) -> - -- reset the objectMultiplicity counter for each new objectId - let st' = st{objectMultiplicity = 0} - in -- We iterate over the list of providers, and pick them or not according to the current state - -- When a peer is selected as a provider for this objectId, we insert the objectId in the peer's set in peersToObjectsToReq (inside St) - -- So the result of the filtering of providers is part of the final St state - Foldable.foldl' - (howToFoldProviders objectId expectedMultiplicity) - st' - providers - ) - DownloadPickState - { totalNumObjectsToReq = 0 - , objectMultiplicity = 0 - , peersToObjectsToReq = Map.empty - } - objectsToProvidersAndExpectedMultiplicities + -- The current decision logic is greedy on objects, so it will try to request as many copies of the same object as possible, + -- meaning we will have optimal coverage of the first objects, but might not request some other objects at all if they are (only) provided by peers that are already saturated. - -- This function decides whether or not we should select a given peer as provider for the current objectId - -- it takes into account if we are expecting to obtain the object from other sources (either inflight/owt pool already, or if the object will be requested from already selected peers in this given round) - howToFoldProviders :: - objectId -> - ObjectMultiplicity -> - DownloadPickState peerAddr objectId -> - (peerAddr, NumObjectsReq) -> - DownloadPickState peerAddr objectId - howToFoldProviders objectId expectedMultiplicity st@DownloadPickState{totalNumObjectsToReq, objectMultiplicity, peersToObjectsToReq} (peerAddr, numObjectsInFlight) = - let - -- see what has already been attributed to this peer - objectsToReq = Map.findWithDefault Set.empty peerAddr peersToObjectsToReq + -- Now we compute the actual attribution of downloads for peers + DownloadPickState{peersToObjectsToReq} = + -- We iterate over each objectId and the corresponding (providers, expectedMultiplicity) + Map.foldlWithKey' + ( \st objectId (providers, expectedMultiplicity) -> + -- reset the objectMultiplicity counter for each new objectId + let st' = st{objectMultiplicity = 0} + in -- We iterate over the list of providers, and pick them or not according to the current state + -- When a peer is selected as a provider for this objectId, we insert the objectId in the peer's set in peersToObjectsToReq (inside St) + -- So the result of the filtering of providers is part of the final St state + Foldable.foldl' + (howToFoldProviders objectId expectedMultiplicity) + st' + providers + ) + DownloadPickState + { totalNumObjectsToReq = 0 + , objectMultiplicity = 0 + , peersToObjectsToReq = Map.empty + } + objectsToProvidersAndExpectedMultiplicities - shouldSelect = - -- We should not go over the multiplicity limit per object - objectMultiplicity + expectedMultiplicity < dpMaxObjectInflightMultiplicity - -- We should not go over the total number of objects inflight limit - && totalNumObjectsInflight + totalNumObjectsToReq < dpMaxNumObjectsInflightTotal - -- We should not go over the per-peer number of objects inflight limit - && numObjectsInFlight + (fromIntegral $ Set.size objectsToReq) < dpMaxNumObjectsInflightPerPeer - in - if shouldSelect - then - -- We increase both global count and per-object count, and we add the object to the peer's set - DownloadPickState - { totalNumObjectsToReq = totalNumObjectsToReq + 1 - , objectMultiplicity = objectMultiplicity + 1 - , peersToObjectsToReq = Map.insert peerAddr (Set.insert objectId objectsToReq) peersToObjectsToReq - } - -- Or we keep the state as is if we don't select this peer - else st + -- This function decides whether or not we should select a given peer as provider for the current objectId + -- it takes into account if we are expecting to obtain the object from other sources (either inflight/owt pool already, or if the object will be requested from already selected peers in this given round) + howToFoldProviders :: + objectId -> + ObjectMultiplicity -> + DownloadPickState peerAddr objectId -> + (peerAddr, NumObjectsReq) -> + DownloadPickState peerAddr objectId + howToFoldProviders objectId expectedMultiplicity st@DownloadPickState{totalNumObjectsToReq, objectMultiplicity, peersToObjectsToReq} (peerAddr, numObjectsInFlight) = + let + -- see what has already been attributed to this peer + objectsToReq = Map.findWithDefault Set.empty peerAddr peersToObjectsToReq + + shouldSelect = + -- We should not go over the multiplicity limit per object + objectMultiplicity + expectedMultiplicity < dpMaxObjectInflightMultiplicity + -- We should not go over the total number of objects inflight limit + && totalNumObjectsInflight + totalNumObjectsToReq < dpMaxNumObjectsInflightTotal + -- We should not go over the per-peer number of objects inflight limit + && numObjectsInFlight + (fromIntegral $ Set.size objectsToReq) < dpMaxNumObjectsInflightPerPeer + in + if shouldSelect + then + -- We increase both global count and per-object count, and we add the object to the peer's set + DownloadPickState + { totalNumObjectsToReq = totalNumObjectsToReq + 1 + , objectMultiplicity = objectMultiplicity + 1 + , peersToObjectsToReq = Map.insert peerAddr (Set.insert objectId objectsToReq) peersToObjectsToReq + } + -- Or we keep the state as is if we don't select this peer + else st diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index a09327cc80..b3603aa3d1 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -24,6 +24,7 @@ import Control.Monad.Class.MonadFork import Control.Monad.Class.MonadThrow import Control.Monad.Class.MonadTime.SI import Control.Monad.Class.MonadTimer.SI +import Control.Monad.IO.Class (MonadIO) import Control.Tracer (Tracer, traceWith) import Data.Foldable as Foldable (traverse_) import Data.Map.Strict (Map) @@ -39,7 +40,6 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsAck, NumObjectIdsReq) import System.Random (initStdGen) -import Control.Monad.IO.Class (MonadIO) -- | Communication channels between `ObjectDiffusion` mini-protocol inbound side -- and decision logic. @@ -122,12 +122,14 @@ withPeer return ( peerToChannel' , PeerStateAPI - { psaReadDecision = do -- TODO: make atomic + { psaReadDecision = do + -- TODO: make atomic decision <- takeMVar chan' let decision' = decision{pdExecutingDecision = True} putMVar chan' decision' return decision' - , psaOnDecisionExecuted = do -- TODO: make atomic + , psaOnDecisionExecuted = do + -- TODO: make atomic decision <- takeMVar chan' let decision' = decision{pdExecutingDecision = False} putMVar chan' decision' diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 1d00f0e496..6965523f46 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -297,7 +297,8 @@ onReceiveObjectsImpl peerAddr objectsReceived st@DecisionGlobalState - { dgsPeerStates } = + { dgsPeerStates + } = st { dgsPeerStates = dgsPeerStates' } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index bcb4d27d6d..8a43ae00a2 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -225,7 +225,8 @@ makeObjectDiffusionCounters ObjectDiffusionCounters { odcNumDistinctObjectsAvailable = Map.size $ dgsObjectsAvailableMultiplicities dgs , odcNumDistinctObjectsInflight = Map.size $ dgsObjectsInflightMultiplicities dgs - , odcNumTotalObjectsInflight = fromIntegral . mconcat . Map.elems $ dgsObjectsInflightMultiplicities dgs + , odcNumTotalObjectsInflight = + fromIntegral . mconcat . Map.elems $ dgsObjectsInflightMultiplicities dgs , odcNumDistinctObjectsOwtPool = Map.size $ dgsObjectsOwtPoolMultiplicities dgs } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/PerasCert.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/PerasCert.hs index 878f4630f8..c86cef1707 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/PerasCert.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/PerasCert.hs @@ -14,8 +14,8 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.PerasCert ) where import Ouroboros.Consensus.Block -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1.State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1 +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V1.State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Outbound import Ouroboros.Consensus.Storage.PerasCertDB.API From d6aaf0c939f11701ce02838630864c426911097c Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Mon, 20 Oct 2025 14:23:23 +0200 Subject: [PATCH 31/43] Update s-r-p to point on latest ouroboros-network/peras-staging --- cabal.project | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cabal.project b/cabal.project index 06c514ccbb..6fa991395e 100644 --- a/cabal.project +++ b/cabal.project @@ -59,8 +59,8 @@ allow-newer: source-repository-package type: git location: https://github.com/IntersectMBO/ouroboros-network - tag: 04de687146c07c17a6566577040781f66a9e8a11 - --sha256: sha256-97KSu1vlohcGyBhGQdGuEPErPaK01Y5RPepfEtcclMM= + tag: 0db8669b67982cba755e80bf2e413527def41244 + --sha256: sha256-vEO721Xab0RTVKFQFKal5VCV5y+OUzELo8+7Z8TETJQ= subdir: ouroboros-network ouroboros-network-protocols From aa34febd2a608e7c08a1fc5cc27672c2fb0ac71f Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Mon, 20 Oct 2025 14:46:23 +0200 Subject: [PATCH 32/43] Move V2.mermaid to V2.md Co-authored-by: nbacquey --- .../ObjectDiffusion/Inbound/V1.hs | 3 +- .../ObjectDiffusion/Inbound/V2.hs | 2 +- .../ObjectDiffusion/Inbound/V2.md | 40 +++++++++++++++++++ .../ObjectDiffusion/Inbound/V2.mermaid | 33 --------------- 4 files changed, 42 insertions(+), 36 deletions(-) create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md delete mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs index cec38d41ae..8ca2041356 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V1.hs @@ -147,8 +147,7 @@ objectDiffusionInbound controlMessageSTM state = ObjectDiffusionInboundPipelined $ - pure $ - continueWithStateM (go Zero) initialInboundSt + continueWithStateM (go Zero) initialInboundSt where canRequestMoreObjects :: InboundSt k object -> Bool canRequestMoreObjects st = diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index 8c7bf9d5ab..cea3d624f3 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -68,7 +68,7 @@ objectDiffusionInbound , psaOnReceiveIds , psaOnReceiveObjects } = - ObjectDiffusionInboundPipelined $ pure $ goIdle Zero + ObjectDiffusionInboundPipelined $ goIdle Zero where goIdle :: forall (n :: N). Nat n -> InboundStIdle n objectId object m () goIdle n = WithEffect $ do diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md new file mode 100644 index 0000000000..1569b46e77 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md @@ -0,0 +1,40 @@ +# Object Diffusion Inbound Mini-Protocol V2 + +```mermaid +%%{init: {"flowchart": {"htmlLabels": true}} }%% +flowchart TD + A(dpsNumIdsInFlight) + B(dpsOutstandingFifo) + C(dpsObjectsAvailableIds) + D(dpsObjectsInflightIds) + F(dpsObjectsOwtPool) + + EA{onRequestIds} + EA-->|+count| A + B -->|-ids| EA + C -->|-ids| EA + + EB{onReceiveIds} + A -->|-count| EB + EB -->|+ids| B + IN1@{ shape: lin-cyl, label: "ids" } --o EB + EB -->|+ids| C + + EC{onRequestObjects} + C -->|-ids| EC + EC -->|+ids| D + + ED{onReceiveObjects / submitToPool} + D -->|-ids| ED + IN2@{ shape: lin-cyl, label: "objects" } --o ED + ED -->|+objects| F + + EE{makeDecisions} + EA ~~~ EE + EC ~~~ EE + EE -.->|readDecision : pdIdsToAck + pdIdsToReq + pdCanPipelineIdsReq/| EA + EE -.->|readDecision : pdObjectsToReqIds| EC + + EG{Added to pool} + F -->|-objects| EG +``` diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid deleted file mode 100644 index 9f6cff0006..0000000000 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.mermaid +++ /dev/null @@ -1,33 +0,0 @@ -flowchart TD - A(dpsNumIdsInFlight) - B(dpsOutstandingFifo) - C(dpsObjectsAvailableIds) - D(dpsObjectsInflightIds) - F(dpsObjectsOwtPool) - - EA{onRequestIds} - EA-->|+count| A - B -->|-ids| EA - C -->|-ids| EA - - EB{onReceiveIds} - A -->|-count| EB - EB -->|+ids| B - IN1@{ shape: lin-cyl, label: "ids" } --o EB - EB -->|+ids| C - - EC{onRequestObjects} - C -->|-ids| EC - EC -->|+ids| D - - ED{onReceiveObjects / submitToPool} - D -->|-ids| ED - IN2@{ shape: lin-cyl, label: "objects" } --o ED - ED -->|+objects| F - - EE{makeDecisions} - EE -.->|readDecision : pdIdsToAck + pdIdsToReq + pdCanPipelineIdsReq/| EA - EE -.->|readDecision : pdObjectsToReqIds| EC - - EG{Added to pool} - F -->|-objects| EG From 4e145a350ad613c92607de8b23b4c303ee41a376 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Mon, 20 Oct 2025 17:07:21 +0200 Subject: [PATCH 33/43] WIP: documentation effort on registry and state --- .../ObjectDiffusion/Inbound/V2.hs | 15 +- .../ObjectDiffusion/Inbound/V2.md | 40 +++- .../ObjectDiffusion/Inbound/V2/Decision.hs | 4 +- .../ObjectDiffusion/Inbound/V2/Registry.hs | 196 +++++++++++------- .../ObjectDiffusion/Inbound/V2/State.hs | 5 +- .../ObjectDiffusion/Inbound/V2/Types.hs | 26 ++- 6 files changed, 197 insertions(+), 89 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs index cea3d624f3..c12ca20c3c 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.hs @@ -1,4 +1,3 @@ -{-# LANGUAGE BangPatterns #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE GADTs #-} @@ -45,9 +44,9 @@ import Ouroboros.Network.Protocol.ObjectDiffusion.Inbound -- 2. Handle any available reply (`goCollect`) -- 3. Request new objects if possible (`goReqObjects`) -- 4. Request new ids (also responsible for ack) (`goReqIds`) --- 5. signal psaOnDecisionExecuted (as part of `goReqIds{Blocking,NonBlocking}`) +-- 5. signal psaOnDecisionCompleted (as part of `goReqIds{Blocking,NonBlocking}`) -- And loop again --- We need to make sure we don't go again into `goIdle` until `psaOnDecisionExecuted` has been called +-- We need to make sure we don't go again into `goIdle` until `psaOnDecisionCompleted` has been called objectDiffusionInbound :: forall objectId object m. ( MonadThrow m @@ -62,7 +61,7 @@ objectDiffusionInbound controlMessageSTM PeerStateAPI { psaReadDecision - , psaOnDecisionExecuted + , psaOnDecisionCompleted , psaOnRequestIds , psaOnRequestObjects , psaOnReceiveIds @@ -141,11 +140,11 @@ objectDiffusionInbound goReqIdsBlocking PeerDecision{pdNumIdsToAck, pdNumIdsToReq} = WithEffect $ do if pdNumIdsToReq == 0 then do - psaOnDecisionExecuted + psaOnDecisionCompleted pure $ goIdle Zero else do psaOnRequestIds pdNumIdsToAck pdNumIdsToReq - psaOnDecisionExecuted + psaOnDecisionCompleted pure $ SendMsgRequestObjectIdsBlocking pdNumIdsToAck @@ -164,11 +163,11 @@ objectDiffusionInbound goReqIdsPipelined n PeerDecision{pdNumIdsToAck, pdNumIdsToReq} = WithEffect $ do if pdNumIdsToReq == 0 then do - psaOnDecisionExecuted + psaOnDecisionCompleted pure $ goIdle n else do psaOnRequestIds pdNumIdsToAck pdNumIdsToReq - psaOnDecisionExecuted + psaOnDecisionCompleted pure $ SendMsgRequestObjectIdsPipelined pdNumIdsToAck diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md index 1569b46e77..b9c72f1b6b 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md @@ -1,5 +1,16 @@ # Object Diffusion Inbound Mini-Protocol V2 +### TODOs + +- talk about pipelined vs. not pipelined +- what happens when a peer gets disconnected: ensure correctness +- describe the strategy for individual peer decisions +- talk about contraints / partial functions +- how is acknowledgement implemented, and where +- describe lifetime of id + related object +- describe main loop, starting with makeDecisions? +- list differences with TxSubmissionV2 + ```mermaid %%{init: {"flowchart": {"htmlLabels": true}} }%% flowchart TD @@ -24,7 +35,7 @@ flowchart TD C -->|-ids| EC EC -->|+ids| D - ED{onReceiveObjects / submitToPool} + ED{onReceiveObjects} D -->|-ids| ED IN2@{ shape: lin-cyl, label: "objects" } --o ED ED -->|+objects| F @@ -38,3 +49,30 @@ flowchart TD EG{Added to pool} F -->|-objects| EG ``` + +The inbound peer (defined in `V2.hs`) has no direct access to the state. It only has access to a monadic API `PeerStateAPI` defined in `Registry.hs`. + +This API has 4 callbacks, that trigger global state changes: + +- `psaOnRequestIds` (corresponding to `onRequestIds` from `State.hs`) that must be called when emitting a request for new IDs (that also acks previously received IDs that we no longer care about). Under the hood, `onRequestIds` will increase the `dpsNumIdsInFlight` count by the requested number of IDs, and remove the acked IDs from `dpsOutstandingFifo` and `dpsObjectsAvailableIds`. +- `psaOnReceiveIds` (corresponding to `onReceiveIds` from `State.hs`) that must be called after receiving new IDs from the outbound peer, after validating that we received the correct number (not more than requested). Under the hood, `onReceiveIds` will decrease the `dpsNumIdsInFlight` count by **the number of IDs that were requested in the request corresponding to this reply** (it might be more than the number of received IDs), and add the received IDs to `dpsOutstandingFifo` and `dpsObjectsAvailableIds`. +- `psaOnRequestObjects` (corresponding to `onRequestObjects` from `State.hs`) that must be called when emitting a request for new objects. Under the hood, `onRequestObjects` will remove the requested IDs from `dpsObjectsAvailableIds` and add them to `dpsObjectsInflightIds`. +- `psaOnReceiveObjects` (corresponding to `onReceiveObjects` from `State.hs`) that must be called when receiving objects from the outbound peer, after validating that the received objects match exactly the requested IDs, and that all received objects have valid cryptographic proofs. Under the hood, `onReceiveObjects` will remove the received IDs from `dpsObjectsInflightIds`, and add the received objects to `dpsOwtPool`, and call the `submitObjectsToPool` subroutine that will actually insert the objects into the object pool when the lock can be acquired (at which point the objects are removed from `dpsOwtPool`) + +To know when to call request IDs or objects, the inbound peer (`V2.hs`) relies on a global-state decision procedure, running in another thread, mainly implemented in `Decision.hs` and called from `Registry.hs`. + +This decision procedure refreshes `PeerDecision`s periodically for each peer. The peer can read this decision via the `psaReadDecision` callback in the `PeerStateAPI`. The decision has a flag `pdStatus` that is set to `DecisionBeingActedUpon` when a decision has been read by the peer, and stays at that state until the peer calls `psaOnDecisionCompleted`, indicating that it has executed the decision (the status is set to `DecisionCompleted`) and that the global decision logic can generate a new one for this peer (a decision can also be updated when it is in state `DecisionUnread`). While the flag is set to `DecisionBeingActedUpon`, the global-state decision logic will not update the decision for this peer (it is locked, or "frozen"). + +In the decision, the peer can find: +- `pdIdsToReq`: number of new IDs to request from the outbound peer +- `pdIdsToAck`: a set of IDs that the peer should ack in its next request for IDs. Note that if `pdIdsToReq` is zero, then no request for IDs will be sent, and thus no acking will happen despite `pdIdsToAck` being non-zero. +- `pdCanPipelineIdsReq`: a flag indicating whether the peer can pipeline its requests for IDs (instead of making a blocking call). In ObjectDiffusion protocol specification, a peer can only pipeline requests for IDs when there are some unacknowledged IDs, i.e. when the `dpsOutstandingFifo` is not empty. +- `pdObjectsToReqIds`: the IDs of the objects that the inbound peer should request from the outbound peer. +- `pdExecutingDecision` flag is not meant to be read by the peer itself; it will always be set to `True` when the inbound peer is able to read a decision. + +In a round of its main loop, the inbound peer will: +1. Read the current decision via `psaReadDecision` +2. Try to read any available reply from the outbound peer if there have been pipelined requests in previous rounds +3. Try to request objects (if any) as per `pdObjectsToReqIds` +4. Try to request IDs (if any) as per `pdIdsToReq`; acking `pdIdsToAck` as a side-effect +5. Call `psaOnDecisionExecuted` to signal that a new decision should be made for this peer diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index dc5ccc2ec5..5592330684 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -49,7 +49,7 @@ makeDecisions :: makeDecisions rng hasObject decisionPolicy globalState prevDecisions = let -- A subset of peers are currently executing a decision. We shouldn't update the decision for them - frozenPeersToDecisions = Map.filter pdExecutingDecision prevDecisions + frozenPeersToDecisions = Map.filter (\PeerDecision{pdStatus} -> pdStatus == DecisionBeingActedUpon) prevDecisions -- We do it in two steps, because computing the acknowledgment tell which objects from dpsObjectsAvailableIds sets of each peer won't actually be available anymore (as soon as we ack them), -- so that the pickObjectsToReq function can take this into account. @@ -124,7 +124,7 @@ computeAck poolHasObject DecisionPolicy{dpMaxNumObjectIdsReq, dpMaxNumObjectsOut , pdNumIdsToReq , pdCanPipelineIdsRequests , pdObjectsToReqIds = Set.empty -- we don't decide this here - , pdExecutingDecision = False + , pdStatus = DecisionUnread } in ( Map.insert peerAddr peerDecision decisionsAcc diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index b3603aa3d1..3eb8034bab 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -1,7 +1,6 @@ {-# LANGUAGE BangPatterns #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE ImportQualifiedPost #-} -{-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -17,9 +16,8 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Registry , decisionLogicThread ) where -import Control.Concurrent.Class.MonadMVar.Strict import Control.Concurrent.Class.MonadSTM.Strict -import Control.Monad (forever) +import Control.Monad (forever, when) import Control.Monad.Class.MonadFork import Control.Monad.Class.MonadThrow import Control.Monad.Class.MonadTime.SI @@ -44,28 +42,80 @@ import System.Random (initStdGen) -- | Communication channels between `ObjectDiffusion` mini-protocol inbound side -- and decision logic. type PeerDecisionChannels m peerAddr objectId object = - Map peerAddr (StrictMVar m (PeerDecision objectId object)) + Map peerAddr (StrictTVar m (PeerDecision objectId object)) type PeerDecisionChannelsVar m peerAddr objectId object = - StrictMVar m (PeerDecisionChannels m peerAddr objectId object) + StrictTVar m (PeerDecisionChannels m peerAddr objectId object) newPeerDecisionChannelsVar :: - MonadMVar m => m (PeerDecisionChannelsVar m peerAddr objectId object) -newPeerDecisionChannelsVar = newMVar (Map.empty) + MonadSTM m => m (PeerDecisionChannelsVar m peerAddr objectId object) +newPeerDecisionChannelsVar = newTVarIO (Map.empty) data PeerStateAPI m objectId object = PeerStateAPI { psaReadDecision :: m (PeerDecision objectId object) - -- ^ a blocking action which reads `PeerDecision` - , psaOnDecisionExecuted :: m () - -- ^ to be called by the peer when it has fully executed the decision. - -- Marks the peer as available for the `makeDecision` logic + -- ^ A blocking action which reads the `PeerDecision` for this peer from the decision channel. + -- It blocks until a new decision (i.e. with status `DecisionUnread`) is emitted for the peer by the deecision thread, + -- and immediately turn its status to `DecisionBeingActedUpon`. + -- + -- PRECONDITIONS: + -- * The decision in the channel has status `DecisionUnread` or `DecisionCompleted` + -- POSTCONDITIONS: + -- * The decision in the channel has status `DecisionBeingActedUpon` + , psaOnDecisionCompleted :: m () + -- ^ To be called by the peer when it has fully executed the decision. + -- Marks the peer as available for the decision logic. + -- + -- PRECONDITIONS: + -- * The decision in the channel has status `DecisionBeingActedUpon` + -- POSTCONDITIONS: + -- * The decision in the channel has status `DecisionCompleted` , psaOnRequestIds :: NumObjectIdsAck -> NumObjectIdsReq -> m () + -- ^ To be called when emitting a request for new IDs (that also acks previously received IDs that we no longer care about). + -- Under the hood, it will increase the `dpsNumIdsInFlight` count by the requested number of IDs, and remove the acked IDs + -- from `dpsOutstandingFifo` and `dpsObjectsAvailableIds`. Note that those IDs may not be present in the latter, if they have + -- already been requested to the outbound peer. + -- + -- PRECONDITIONS: + -- * `dpsOutstandingFifo` has at least `nAck :: NumObjectIdsAck` IDs that will be removed from it + -- POSTCONDITIONS: + -- * The `nAck` first IDs from `dpsOutstandingFifo` are removed from `dpsOutstandingFifo` and removed from `dpsObjectsAvailableIds` , psaOnRequestObjects :: Set objectId -> m () + -- ^ To be called when emitting a request for new objects. Under the hood, it will remove the requested IDs from `dpsObjectsAvailableIds` + -- and add them to `dpsObjectsInflightIds`. + -- + -- PRECONDITIONS: + -- * The requested IDs are a subset of `dpsObjectsAvailableIds` + -- * The requested IDs are not in `dpsObjectsInflightIds` + -- POSTCONDITIONS: + -- * The requested IDs are removed from `dpsObjectsAvailableIds` + -- * The requested IDs are now in `dpsObjectsInflightIds` , psaOnReceiveIds :: NumObjectIdsReq -> [objectId] -> m () - -- ^ Error handling should have been done before calling this + -- ^ To be called after receiving new IDs from the outbound peer, after validating that we received the correct number (not more than requested). + -- Under the hood, it will decrease the `dpsNumIdsInFlight` count by **the number of IDs that were requested in the request corresponding to this reply**. + -- This number might be more than the number of received IDs. It also add the received IDs to `dpsOutstandingFifo` and `dpsObjectsAvailableIds`. + -- + -- PRECONDITIONS: + -- * The number of received IDs is less than or equal to `nReq :: NumObjectIdsReq` (the number of IDs that were requested in the request corresponding to this reply) + -- * The received IDs are not already in `dpsObjectsAvailableIds` nor in `dpsObjectsInflightIds` nor in `dpsObjectsOwtPool` + -- * The received IDs do not contain duplicates + -- * `dpsNumIdsInFlight` is greater than or equal to `nReq :: NumObjectIdsReq` + -- POSTCONDITIONS: + -- * `dpsNumIdsInflight` is `nReq` less than before + -- * `dpsOutstandingFifo` contains the received IDs appended at the end in the same order as they were received + -- * `dpsObjectsAvailableIds` contains the received IDs , psaOnReceiveObjects :: [object] -> m () - -- ^ Error handling should have been done before calling this - -- Also every object should have been validated! + -- ^ To be called when receiving objects from the outbound peer, after validating that the received objects match exactly the requested IDs. + -- It also checks that all received objects have valid cryptographic proofs. + -- Under the hood, it will remove the received IDs from `dpsObjectsInflightIds`, add the received objects to `dpsOwtPool`, + -- and call the `submitObjectsToPool` subroutine that will actually insert the objects into the object pool. + -- + -- PRECONDITIONS: + -- * All received objects are valid wrt. their cryptographic proofs/invariants specific to the object type + -- * The received objects correspond exactly to the set of requested objects (order not mattering) + -- * The IDs of the received objects are a subset of `dpsObjectsInflightIds` + -- POSTCONDITIONS: + -- * The IDs of the received objects are removed from `dpsObjectsInflightIds` + -- * `dpsObjectsOwtPool` contains the received objects } -- | A bracket function which registers / de-registers a new peer in @@ -74,7 +124,6 @@ data PeerStateAPI m objectId object = PeerStateAPI withPeer :: forall object peerAddr objectId m a. ( MonadMask m - , MonadMVar m , MonadSTM m , Ord objectId , Ord peerAddr @@ -104,35 +153,36 @@ withPeer bracket registerPeerAndCreateAPI unregisterPeer withAPI where registerPeerAndCreateAPI :: m (PeerStateAPI m objectId object) - registerPeerAndCreateAPI = do - -- create the API for this peer, obtaining a channel for it in the process - !inboundPeerAPI <- - modifyMVar - decisionChannelsVar - \peerToChannel -> do - -- We get a channel for this peer, and register it in peerToChannel. - (chan', peerToChannel') <- - case peerToChannel Map.!? peerAddr of - -- Checks if a channel already exists for this peer, in case we reuse it - Just chan -> return (chan, peerToChannel) - -- Otherwise create a new channel and register it - Nothing -> do - chan <- newEmptyMVar - return (chan, Map.insert peerAddr chan peerToChannel) - return - ( peerToChannel' - , PeerStateAPI - { psaReadDecision = do - -- TODO: make atomic - decision <- takeMVar chan' - let decision' = decision{pdExecutingDecision = True} - putMVar chan' decision' + registerPeerAndCreateAPI = atomically $ do + peerToChannel <- readTVar decisionChannelsVar + decisionChan <- case peerToChannel Map.!? peerAddr of + -- Checks if a channel already exists for this peer, in case we reuse it + Just chan -> return chan + -- Otherwise create a new channel and register it + Nothing -> do + newChan <- newTVar unavailableDecision + modifyTVar decisionChannelsVar (Map.insert peerAddr newChan) + return newChan + + let !inboundPeerAPI = + PeerStateAPI + { psaReadDecision = atomically $ do + -- This should block until the decision has status `DecisionUnread` + -- which means it is a new decision that the peer has not acted upon yet + -- If `DecisionCompleted` is read here, it means the decision logic hasn't had time to make a new decision for this peer + -- If `DecisionBeingActedUpon` is read here, it means the peer forgot to call `psaOnDecisionCompleted` after acting upon the last decision + decision@PeerDecision{pdStatus} <- readTVar decisionChan + when (pdStatus == DecisionBeingActedUpon) $ error "Forgot to call `psaOnDecisionCompleted` for this peer" + check $ pdStatus == DecisionUnread + let decision' = decision{pdStatus = DecisionBeingActedUpon} + writeTVar decisionChan decision' return decision' - , psaOnDecisionExecuted = do - -- TODO: make atomic - decision <- takeMVar chan' - let decision' = decision{pdExecutingDecision = False} - putMVar chan' decision' + , psaOnDecisionCompleted = atomically $ do + decision@PeerDecision{pdStatus} <- readTVar decisionChan + when (pdStatus == DecisionUnread) $ error "Forgot to call `psaReadDecision` for this peer, or the decision thread has mistakenly updated the decision for this peer while it was executing it" + when (pdStatus == DecisionCompleted) $ error "`psaOnDecisionCompleted` has already been called for this peer" + let decision' = decision{pdStatus = DecisionCompleted} + writeTVar decisionChan decision' , psaOnRequestIds = State.onRequestIds objectDiffusionTracer @@ -160,26 +210,23 @@ withPeer objectPoolSem peerAddr } - ) + -- register the peer in the global state now - atomically $ modifyTVar globalStateVar registerPeerGlobalState + modifyTVar globalStateVar registerPeerGlobalState -- initialization is complete for this peer, it can proceed and -- interact through its given API return inboundPeerAPI where unregisterPeer :: PeerStateAPI m objectId object -> m () - unregisterPeer _ = + unregisterPeer _api = -- the handler is a short blocking operation, thus we need to use -- `uninterruptibleMask_` - uninterruptibleMask_ do + uninterruptibleMask_ $ atomically $ do -- unregister the peer from the global state - atomically $ modifyTVar globalStateVar unregisterPeerGlobalState + modifyTVar globalStateVar unregisterPeerGlobalState -- remove the channel of this peer from the global channel map - modifyMVar_ - decisionChannelsVar - \peerToChannel -> - return $ Map.delete peerAddr peerToChannel + modifyTVar decisionChannelsVar (Map.delete peerAddr) registerPeerGlobalState :: DecisionGlobalState peerAddr objectId object -> @@ -215,7 +262,6 @@ withPeer decisionLogicThread :: forall m peerAddr objectId object. ( MonadDelay m - , MonadMVar m , MonadSTM m , MonadFork m , MonadIO m @@ -234,34 +280,40 @@ decisionLogicThread decisionTracer countersTracer ObjectPoolWriter{opwHasObject} forever $ do -- We rate limit the decision making process, it could overwhelm the CPU -- if there are too many inbound connections. - threadDelay _DECISION_LOOP_DELAY + threadDelay const_DECISION_LOOP_DELAY rng <- initStdGen -- TODO: can we make this whole block atomic? -- because makeDecisions should be atomic with respect to reading the global state and -- reading the previous decisions - decisionsChannels <- readMVar decisionChannelsVar - prevDecisions <- traverse takeMVar decisionsChannels - (newDecisions, globalState) <- atomically $ do + (newDecisions, counters) <- atomically $ do + + decisionsChannels <- readTVar decisionChannelsVar + prevDecisions <- traverse readTVar decisionsChannels globalState <- readTVar globalStateVar hasObject <- opwHasObject - pure $ (makeDecisions rng hasObject decisionPolicy globalState prevDecisions, globalState) + let newDecisions = makeDecisions rng hasObject decisionPolicy globalState prevDecisions - traceWith decisionTracer (TraceDecisionLogicDecisionsMade newDecisions) - peerToChannel <- readMVar decisionChannelsVar - -- Pair decision channel with the corresponding decision - let peerToChannelAndDecision = - Map.intersectionWith - (,) - peerToChannel - newDecisions - -- Send the newDecisions to the corresponding peers - -- Note that newDecisions are incremental, so we merge the old one to the new one (using the semigroup instance) if there is an old one - traverse_ (uncurry putMVar) peerToChannelAndDecision + peerToChannel <- readTVar decisionChannelsVar + -- Pair decision channel with the corresponding decision + let peerToChannelAndDecision = + Map.intersectionWith + (,) + peerToChannel + newDecisions + -- Send the newDecisions to the corresponding peers + traverse_ + (\(chan, decision) -> writeTVar chan decision) + peerToChannelAndDecision + + -- Return values for tracing purposes + let counters = makeObjectDiffusionCounters globalState + return (newDecisions, counters) - traceWith countersTracer (makeObjectDiffusionCounters globalState) + traceWith decisionTracer (TraceDecisionLogicDecisionsMade newDecisions) + traceWith countersTracer counters -- `5ms` delay -_DECISION_LOOP_DELAY :: DiffTime -_DECISION_LOOP_DELAY = 0.005 +const_DECISION_LOOP_DELAY :: DiffTime +const_DECISION_LOOP_DELAY = 0.005 diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 6965523f46..41108fab26 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -27,7 +27,7 @@ import Data.Set qualified as Set import GHC.Stack (HasCallStack) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.ObjectPool.API (ObjectPoolWriter (..)) -import Ouroboros.Consensus.Util.IOLike (MonadMVar, MonadMask, bracket_) +import Ouroboros.Consensus.Util.IOLike (MonadMask, bracket_) import Ouroboros.Network.Protocol.ObjectDiffusion.Type (NumObjectIdsAck, NumObjectIdsReq) onRequestIds :: @@ -244,7 +244,6 @@ onReceiveObjects :: forall m peerAddr object objectId. ( MonadSTM m , MonadMask m - , MonadMVar m , Ord objectId , Ord peerAddr ) => @@ -328,13 +327,11 @@ onReceiveObjectsImpl dgsPeerStates' = Map.insert peerAddr peerState' dgsPeerStates --- | Should be called by `acknowledgeIds` submitObjectsToPool :: forall m peerAddr object objectId. ( Ord objectId , Ord peerAddr , MonadMask m - , MonadMVar m , MonadSTM m ) => Tracer m (TraceObjectDiffusionInbound objectId object) -> diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 8a43ae00a2..3f7dd9d41a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -5,7 +5,6 @@ {-# LANGUAGE GADTs #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE ImportQualifiedPost #-} -{-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -23,6 +22,10 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types -- * Decisions , PeerDecision (..) + , PeerDecisionStatus (..) + , unavailableDecision + + -- * Tracing , mempty , TraceDecisionLogic (..) , ObjectMultiplicity (..) @@ -52,6 +55,7 @@ import Data.Sequence.Strict (StrictSeq) import Data.Set (Set) import Data.Word (Word64) import GHC.Generics (Generic) +import GHC.Stack (HasCallStack) import NoThunks.Class (NoThunks (..)) import Ouroboros.Network.ControlMessage (ControlMessage) import Ouroboros.Network.Protocol.ObjectDiffusion.Type @@ -190,11 +194,29 @@ data PeerDecision objectId object = PeerDecision -- if we have non-acknowledged `objectId`s. , pdObjectsToReqIds :: !(Set objectId) -- ^ objectId's to download. - , pdExecutingDecision :: !Bool + , pdStatus :: !PeerDecisionStatus -- ^ Whether the peer is actually executing the said decision } deriving (Show, Eq) +data PeerDecisionStatus + = DecisionUnread + | DecisionBeingActedUpon + | DecisionCompleted + deriving (Show, Eq) + +-- | A placeholder when no decision has been made, at the beginning of a loop. +-- Nothing should be read from it except its status. +unavailableDecision :: HasCallStack => PeerDecision objectId object +unavailableDecision = + PeerDecision + { pdStatus = DecisionCompleted + , pdObjectsToReqIds = error "This decision is not available yet" + , pdNumIdsToAck = error "This decision is not available yet" + , pdNumIdsToReq = error "This decision is not available yet" + , pdCanPipelineIdsRequests = error "This decision is not available yet" + } + -- | ObjectLogic tracer. data TraceDecisionLogic peerAddr objectId object = TraceDecisionLogicGlobalStateUpdated String (DecisionGlobalState peerAddr objectId object) From f794d6f48f8cc5710f91072ed46c24a772a8320d Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Mon, 20 Oct 2025 20:05:42 +0200 Subject: [PATCH 34/43] Add failures for protocol errors and implementation errors --- .../ObjectDiffusion/Inbound/V2/Registry.hs | 6 ++- .../ObjectDiffusion/Inbound/V2/State.hs | 44 ++++++++++++++++--- 2 files changed, 43 insertions(+), 7 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 3eb8034bab..c6c8309fee 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -170,7 +170,6 @@ withPeer -- This should block until the decision has status `DecisionUnread` -- which means it is a new decision that the peer has not acted upon yet -- If `DecisionCompleted` is read here, it means the decision logic hasn't had time to make a new decision for this peer - -- If `DecisionBeingActedUpon` is read here, it means the peer forgot to call `psaOnDecisionCompleted` after acting upon the last decision decision@PeerDecision{pdStatus} <- readTVar decisionChan when (pdStatus == DecisionBeingActedUpon) $ error "Forgot to call `psaOnDecisionCompleted` for this peer" check $ pdStatus == DecisionUnread @@ -201,7 +200,8 @@ withPeer decisionTracer globalStateVar peerAddr - , psaOnReceiveObjects = + , psaOnReceiveObjects = \objects -> do + PeerDecision{pdObjectsToReqIds} <- atomically $ readTVar decisionChan State.onReceiveObjects objectDiffusionTracer decisionTracer @@ -209,6 +209,8 @@ withPeer objectPoolWriter objectPoolSem peerAddr + pdObjectsToReqIds + objects } -- register the peer in the global state now diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 41108fab26..6388f8d484 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -16,7 +16,8 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State import Control.Concurrent.Class.MonadSTM.Strict import Control.Concurrent.Class.MonadSTM.TSem -import Control.Exception (assert) +import Control.Exception (assert, throw) +import Control.Monad (when) import Control.Tracer (Tracer, traceWith) import Data.Foldable qualified as Foldable import Data.Map.Strict (Map, findWithDefault) @@ -81,6 +82,7 @@ onRequestIdsImpl let -- We compute the ids to ack and new state of the FIFO based on the number of ids to ack given by the decision logic (idsToAck, dpsOutstandingFifo') = + assert (StrictSeq.length dpsOutstandingFifo >= fromIntegral numIdsToAck) $ StrictSeq.splitAt (fromIntegral numIdsToAck) dpsOutstandingFifo @@ -143,6 +145,10 @@ onRequestObjectsImpl dgsPeerStates' = Map.adjust ( \ps@DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} -> + assert + ( objectIds `Set.isSubsetOf` dpsObjectsAvailableIds + && Set.null (objectIds `Set.intersection` dpsObjectsInflightIds) + ) $ ps { dpsObjectsAvailableIds = dpsObjectsAvailableIds \\ objectIds , dpsObjectsInflightIds = dpsObjectsInflightIds `Set.union` objectIds @@ -169,15 +175,32 @@ onReceiveIds :: -- | received `objectId`s m () onReceiveIds odTracer decisionTracer globalStateVar peerAddr numIdsInitiallyRequested receivedIds = do + peerState <- atomically $ ((Map.! peerAddr) . dgsPeerStates) <$> readTVar globalStateVar + checkProtocolErrors peerState numIdsInitiallyRequested receivedIds globalState' <- atomically $ do stateTVar globalStateVar ( \globalState -> let globalState' = onReceiveIdsImpl peerAddr numIdsInitiallyRequested receivedIds globalState - in (globalState', globalState') + in (globalState', globalState') ) traceWith odTracer (TraceObjectDiffusionInboundReceivedIds (length receivedIds)) traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onReceiveIds" globalState') + where + checkProtocolErrors :: + DecisionPeerState objectId object-> + NumObjectIdsReq -> + [objectId] -> + m () + checkProtocolErrors DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} nReq ids = do + when (length ids > fromIntegral nReq) $ throw ProtocolErrorObjectIdsNotRequested + let idSet = Set.fromList ids + when (length ids /= Set.size idSet) $ throw ProtocolErrorObjectIdsDuplicate + when + -- TODO also check for IDs in pool + ( (not $ Set.null $ idSet `Set.intersection` dpsObjectsAvailableIds) + || (not $ Set.null $ idSet `Set.intersection` dpsObjectsInflightIds) + ) $ throw ProtocolErrorObjectIdAlreadyKnown onReceiveIdsImpl :: forall peerAddr object objectId. @@ -253,13 +276,15 @@ onReceiveObjects :: ObjectPoolWriter objectId object m -> ObjectPoolSem m -> peerAddr -> + -- | requested objects + Set objectId -> -- | received objects [object] -> m () -onReceiveObjects odTracer tracer globalStateVar objectPoolWriter poolSem peerAddr objectsReceived = do +onReceiveObjects odTracer tracer globalStateVar objectPoolWriter poolSem peerAddr objectsRequestedIds objectsReceived = do let getId = opwObjectId objectPoolWriter let objectsReceivedMap = Map.fromList $ (\obj -> (getId obj, obj)) <$> objectsReceived - + checkProtocolErrors objectsRequestedIds objectsReceivedMap globalState' <- atomically $ do stateTVar globalStateVar @@ -281,6 +306,15 @@ onReceiveObjects odTracer tracer globalStateVar objectPoolWriter poolSem peerAdd poolSem peerAddr objectsReceivedMap + where + checkProtocolErrors :: + Set objectId-> + Map objectId object -> + m () + checkProtocolErrors requested received' = do + let received = Map.keysSet received' + when (not $ Set.null $ requested \\ received) $ throw ProtocolErrorObjectMissing + when (not $ Set.null $ received \\ requested) $ throw ProtocolErrorObjectNotRequested onReceiveObjectsImpl :: forall peerAddr object objectId. @@ -314,7 +348,7 @@ onReceiveObjectsImpl dgsPeerStates -- subtract requested from in-flight - dpsObjectsInflightIds' = + dpsObjectsInflightIds' = assert (objectsReceivedIds `Set.isSubsetOf` dpsObjectsInflightIds) $ dpsObjectsInflightIds \\ objectsReceivedIds dpsObjectsOwtPool' = dpsObjectsOwtPool <> objectsReceived From b677b71e06cd00833e571de8f02811f8bb228956 Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Tue, 21 Oct 2025 17:13:17 +0200 Subject: [PATCH 35/43] Check that received IDs are not already in pool --- .../ObjectDiffusion/Inbound/V2/Registry.hs | 1 + .../MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs | 11 +++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index c6c8309fee..4cd5f57275 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -198,6 +198,7 @@ withPeer State.onReceiveIds objectDiffusionTracer decisionTracer + objectPoolWriter globalStateVar peerAddr , psaOnReceiveObjects = \objects -> do diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 6388f8d484..0a6e9e11b6 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -165,6 +165,7 @@ onReceiveIds :: (MonadSTM m, Ord objectId, Ord peerAddr) => Tracer m (TraceObjectDiffusionInbound objectId object) -> Tracer m (TraceDecisionLogic peerAddr objectId object) -> + ObjectPoolWriter objectId object m -> DecisionGlobalStateVar m peerAddr objectId object -> peerAddr -> -- | number of requests to subtract from @@ -174,9 +175,10 @@ onReceiveIds :: [objectId] -> -- | received `objectId`s m () -onReceiveIds odTracer decisionTracer globalStateVar peerAddr numIdsInitiallyRequested receivedIds = do +onReceiveIds odTracer decisionTracer ObjectPoolWriter{opwHasObject} globalStateVar peerAddr numIdsInitiallyRequested receivedIds = do peerState <- atomically $ ((Map.! peerAddr) . dgsPeerStates) <$> readTVar globalStateVar - checkProtocolErrors peerState numIdsInitiallyRequested receivedIds + hasObject <- atomically opwHasObject + checkProtocolErrors hasObject peerState numIdsInitiallyRequested receivedIds globalState' <- atomically $ do stateTVar globalStateVar @@ -188,18 +190,19 @@ onReceiveIds odTracer decisionTracer globalStateVar peerAddr numIdsInitiallyRequ traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onReceiveIds" globalState') where checkProtocolErrors :: + (objectId -> Bool) -> DecisionPeerState objectId object-> NumObjectIdsReq -> [objectId] -> m () - checkProtocolErrors DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} nReq ids = do + checkProtocolErrors hasObject DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} nReq ids = do when (length ids > fromIntegral nReq) $ throw ProtocolErrorObjectIdsNotRequested let idSet = Set.fromList ids when (length ids /= Set.size idSet) $ throw ProtocolErrorObjectIdsDuplicate when - -- TODO also check for IDs in pool ( (not $ Set.null $ idSet `Set.intersection` dpsObjectsAvailableIds) || (not $ Set.null $ idSet `Set.intersection` dpsObjectsInflightIds) + || (any hasObject ids) ) $ throw ProtocolErrorObjectIdAlreadyKnown onReceiveIdsImpl :: From da01ff23598e7958584101c21b22b6661c4f66a6 Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Tue, 21 Oct 2025 18:00:59 +0200 Subject: [PATCH 36/43] Formatting and updated comments --- .../ObjectDiffusion/Inbound/V2/State.hs | 171 ++++++++++-------- 1 file changed, 93 insertions(+), 78 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 0a6e9e11b6..6fd07ecc85 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -42,16 +42,22 @@ onRequestIds :: -- | number of requests to req NumObjectIdsReq -> m () -onRequestIds odTracer decisionTracer globalStateVar peerAddr numIdsToAck numIdsToReq = do - globalState' <- atomically $ do - stateTVar - globalStateVar - ( \globalState -> - let globalState' = onRequestIdsImpl peerAddr numIdsToAck numIdsToReq globalState - in (globalState', globalState') - ) - traceWith odTracer (TraceObjectDiffusionInboundRequestedIds (fromIntegral numIdsToReq)) - traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onRequestIds" globalState') +onRequestIds + odTracer + decisionTracer + globalStateVar + peerAddr + numIdsToAck + numIdsToReq = do + globalState' <- atomically $ do + stateTVar + globalStateVar + ( \globalState -> + let globalState' = onRequestIdsImpl peerAddr numIdsToAck numIdsToReq globalState + in (globalState', globalState') + ) + traceWith odTracer (TraceObjectDiffusionInboundRequestedIds (fromIntegral numIdsToReq)) + traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onRequestIds" globalState') -- Acknowledgment is done when a requestIds is made. -- That's why we update the dpsOutstandingFifo and dpsObjectsAvailableIds here. @@ -175,35 +181,42 @@ onReceiveIds :: [objectId] -> -- | received `objectId`s m () -onReceiveIds odTracer decisionTracer ObjectPoolWriter{opwHasObject} globalStateVar peerAddr numIdsInitiallyRequested receivedIds = do - peerState <- atomically $ ((Map.! peerAddr) . dgsPeerStates) <$> readTVar globalStateVar - hasObject <- atomically opwHasObject - checkProtocolErrors hasObject peerState numIdsInitiallyRequested receivedIds - globalState' <- atomically $ do - stateTVar - globalStateVar - ( \globalState -> - let globalState' = onReceiveIdsImpl peerAddr numIdsInitiallyRequested receivedIds globalState - in (globalState', globalState') - ) - traceWith odTracer (TraceObjectDiffusionInboundReceivedIds (length receivedIds)) - traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onReceiveIds" globalState') - where - checkProtocolErrors :: - (objectId -> Bool) -> - DecisionPeerState objectId object-> - NumObjectIdsReq -> - [objectId] -> - m () - checkProtocolErrors hasObject DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} nReq ids = do - when (length ids > fromIntegral nReq) $ throw ProtocolErrorObjectIdsNotRequested - let idSet = Set.fromList ids - when (length ids /= Set.size idSet) $ throw ProtocolErrorObjectIdsDuplicate - when - ( (not $ Set.null $ idSet `Set.intersection` dpsObjectsAvailableIds) - || (not $ Set.null $ idSet `Set.intersection` dpsObjectsInflightIds) - || (any hasObject ids) - ) $ throw ProtocolErrorObjectIdAlreadyKnown +onReceiveIds + odTracer + decisionTracer + ObjectPoolWriter{opwHasObject} + globalStateVar + peerAddr + numIdsInitiallyRequested + receivedIds = do + peerState <- atomically $ ((Map.! peerAddr) . dgsPeerStates) <$> readTVar globalStateVar + hasObject <- atomically opwHasObject + checkProtocolErrors hasObject peerState numIdsInitiallyRequested receivedIds + globalState' <- atomically $ do + stateTVar + globalStateVar + ( \globalState -> + let globalState' = onReceiveIdsImpl peerAddr numIdsInitiallyRequested receivedIds globalState + in (globalState', globalState') + ) + traceWith odTracer (TraceObjectDiffusionInboundReceivedIds (length receivedIds)) + traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onReceiveIds" globalState') + where + checkProtocolErrors :: + (objectId -> Bool) -> + DecisionPeerState objectId object-> + NumObjectIdsReq -> + [objectId] -> + m () + checkProtocolErrors hasObject DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} nReq ids = do + when (length ids > fromIntegral nReq) $ throw ProtocolErrorObjectIdsNotRequested + let idSet = Set.fromList ids + when (length ids /= Set.size idSet) $ throw ProtocolErrorObjectIdsDuplicate + when + ( (not $ Set.null $ idSet `Set.intersection` dpsObjectsAvailableIds) + || (not $ Set.null $ idSet `Set.intersection` dpsObjectsInflightIds) + || (any hasObject ids) + ) $ throw ProtocolErrorObjectIdAlreadyKnown onReceiveIdsImpl :: forall peerAddr object objectId. @@ -260,12 +273,6 @@ onReceiveIdsImpl -- | Wrapper around `onReceiveObjectsImpl` that updates and traces the -- global state TVar. --- --- Error handling should be done by the client before using the API. --- In particular we assume: --- assert (objectsRequestedIds `Set.isSubsetOf` dpsObjectsInflightIds) --- --- IMPORTANT: We also assume that every object has been *validated* before being passed to this function. onReceiveObjects :: forall m peerAddr object objectId. ( MonadSTM m @@ -284,40 +291,48 @@ onReceiveObjects :: -- | received objects [object] -> m () -onReceiveObjects odTracer tracer globalStateVar objectPoolWriter poolSem peerAddr objectsRequestedIds objectsReceived = do - let getId = opwObjectId objectPoolWriter - let objectsReceivedMap = Map.fromList $ (\obj -> (getId obj, obj)) <$> objectsReceived - checkProtocolErrors objectsRequestedIds objectsReceivedMap - globalState' <- atomically $ do - stateTVar +onReceiveObjects + odTracer + tracer + globalStateVar + objectPoolWriter + poolSem + peerAddr + objectsRequestedIds + objectsReceived = do + let getId = opwObjectId objectPoolWriter + let objectsReceivedMap = Map.fromList $ (\obj -> (getId obj, obj)) <$> objectsReceived + checkProtocolErrors objectsRequestedIds objectsReceivedMap + globalState' <- atomically $ do + stateTVar + globalStateVar + ( \globalState -> + let globalState' = + onReceiveObjectsImpl + peerAddr + objectsReceivedMap + globalState + in (globalState', globalState') + ) + traceWith odTracer (TraceObjectDiffusionInboundReceivedObjects (length objectsReceived)) + traceWith tracer (TraceDecisionLogicGlobalStateUpdated "onReceiveObjects" globalState') + submitObjectsToPool + odTracer + tracer globalStateVar - ( \globalState -> - let globalState' = - onReceiveObjectsImpl - peerAddr - objectsReceivedMap - globalState - in (globalState', globalState') - ) - traceWith odTracer (TraceObjectDiffusionInboundReceivedObjects (length objectsReceived)) - traceWith tracer (TraceDecisionLogicGlobalStateUpdated "onReceiveObjects" globalState') - submitObjectsToPool - odTracer - tracer - globalStateVar - objectPoolWriter - poolSem - peerAddr - objectsReceivedMap - where - checkProtocolErrors :: - Set objectId-> - Map objectId object -> - m () - checkProtocolErrors requested received' = do - let received = Map.keysSet received' - when (not $ Set.null $ requested \\ received) $ throw ProtocolErrorObjectMissing - when (not $ Set.null $ received \\ requested) $ throw ProtocolErrorObjectNotRequested + objectPoolWriter + poolSem + peerAddr + objectsReceivedMap + where + checkProtocolErrors :: + Set objectId-> + Map objectId object -> + m () + checkProtocolErrors requested received' = do + let received = Map.keysSet received' + when (not $ Set.null $ requested \\ received) $ throw ProtocolErrorObjectMissing + when (not $ Set.null $ received \\ requested) $ throw ProtocolErrorObjectNotRequested onReceiveObjectsImpl :: forall peerAddr object objectId. From b86f0b20269a89aa50f5274360ee7d1b3c55a544 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Wed, 22 Oct 2025 18:09:46 +0200 Subject: [PATCH 37/43] Futher Documentation changes Co-authored-by: nbacquey --- .../ObjectDiffusion/Inbound/V2.md | 130 +++++++++++++----- .../ObjectDiffusion/Inbound/V2/Decision.hs | 7 + .../ObjectDiffusion/Inbound/V2/Registry.hs | 1 + 3 files changed, 106 insertions(+), 32 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md index b9c72f1b6b..083f6e12d2 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md @@ -2,14 +2,66 @@ ### TODOs -- talk about pipelined vs. not pipelined - what happens when a peer gets disconnected: ensure correctness - describe the strategy for individual peer decisions - talk about contraints / partial functions -- how is acknowledgement implemented, and where - describe lifetime of id + related object -- describe main loop, starting with makeDecisions? -- list differences with TxSubmissionV2 +- list differences with TxSubmissionV + +## General architecture + +This document describes the inner workings of the `ObjectDiffusion` mini-prot + +In ObjectDiffusion V2 (only the inbound side changes compared to V1), each connection to an outbound peer is no longer considered in isolation. Instead, there is a global state `DecisionGlobalState` (defined in `Types.hs`) that tracks the state of all connections together using `dgsPeerStates :: Map peerAddr (DecisionPeerState objectId object)`. Currently, this field is the only of of `DecisionGlobalState`, meaning that we use no global data other than the sum of the peer's. Further on, we denote _an instance of the inbound protocol connected to a specific outbound peer_ simply as _an inbound peer_. + +A `DecisionPeerState` holds the state of the interaction with the distant outbound peer, which is described in more details in [this section](#fields-of-decisionpeerstate-and-their-lifecycle). + +The global state is read periodically from a dedicated _decision_ thread (defined in `Registry.hs` and `Decision.hs`), and for each inbound peer computes a `PeerDecision` (defined in `Types.hs`) that indicates what the inbound peer should do next. More specifically, in the decision, the inbound peer can find: + +- `pdIdsToReq`: number of new IDs to request from the outbound peer +- `pdIdsToAck`: a set of IDs that the peer should ack in its next request for IDs. Note that if `pdIdsToReq` is zero, then no request for IDs will be sent, and thus no acking will happen despite `pdIdsToAck` being non-zero. +- `pdCanPipelineIdsReq`: a flag indicating whether the peer can pipeline its requests for IDs (instead of making a blocking call). +- `pdObjectsToReqIds`: the IDs of the objects that the inbound peer should request from the outbound peer. + +An inbound peer (defined in `V2.hs`) has no direct access to the state, neither in write or read fashion. It only has access to a monadic API `PeerStateAPI` defined in `Registry.hs`. This API has 2 decisions-related callbacks, and 4 state-mutating callbacks. It should follow the decision and call the state-mutating callbacks accordingly to keep the global state consistent with the actions taken. + +**Decision-related callbacks:** + +- `psaReadDecision` that allows the inbound peer to read the current `PeerDecision` made for itself by the last round of the decision logic. This will block if a new decision is not yet available for this peer. +- `psaOnDecisionCompleted` that allows the inbound peer to signal that it has executed the last decision it read, and that the decision logic should now compute a new decision for this peer + +The decision additionally has a flag `pdStatus` (not intended to be read by the peer) that is set to `DecisionBeingActedUpon` when `psaReadDecision`, and stays at that value until the peer calls `psaOnDecisionCompleted`, indicating that it has executed the decision (the status is set to `DecisionCompleted`). This is the main way the peer interacts with the decision thread. While the flag is set to `DecisionBeingActedUpon`, the global-state decision logic will not update the decision for this peer (it is locked, or "frozen"). + +**State-mutating callbacks:** + +These are the callbacks that the inbound peer must call when it takes the corresponding actions that has been dictated by the decision it read. These callbacks will update the corresponding peer state. For reference, the fields of this state are documented in [this section](#fields-of-decisionpeerstate-and-their-lifecycle) + +- `psaOnRequestIds` (corresponding to `onRequestIds` from `State.hs`) that must be called when emitting a request for new IDs (that also acks previously received IDs that we no longer care about). Under the hood, `onRequestIds` will increase the `dpsNumIdsInFlight` count by the requested number of IDs, and remove the acked IDs from `dpsOutstandingFifo` and `dpsObjectsAvailableIds`. +- `psaOnReceiveIds` (corresponding to `onReceiveIds` from `State.hs`) that must be called after receiving new IDs from the outbound peer. Under the hood, `onReceiveIds` will decrease the `dpsNumIdsInFlight` count by **the number of IDs that were requested in the request corresponding to this reply** (it might be more than the number of received IDs), and add the received IDs to `dpsOutstandingFifo` and `dpsObjectsAvailableIds`. +- `psaOnRequestObjects` (corresponding to `onRequestObjects` from `State.hs`) that must be called when emitting a request for new objects. Under the hood, `onRequestObjects` will remove the requested IDs from `dpsObjectsAvailableIds` and add them to `dpsObjectsInflightIds`. +- `psaOnReceiveObjects` (corresponding to `onReceiveObjects` from `State.hs`) that must be called when receiving objects from the outbound peer. Under the hood, `onReceiveObjects` will remove the received IDs from `dpsObjectsInflightIds`, and add the received objects to `dpsOwtPool`, and call the `submitObjectsToPool` subroutine that will actually insert the objects into the object pool when the lock can be acquired (at which point the objects are removed from `dpsOwtPool`) + +NOTE: Protocol error-handling (e.g. making sure the outbound peer has sent the correct information) is be done by the callback themselves, so the inbound peer doesn't have to check anything before calling these state-mutating callbacks. + +## Inbound peer loop + +The inbound peer performs a loop where each iteration starts with (blocking on) reading a new decision, and ends with signaling that the decision has been executed. It should not return to the start of the loop too early, i.e., before it has taken all the actions dictated by the decision (except for acks, that cannot be made when `pdIdsToReq == 0`), as the decision logic considers that once a decision has been read, it is effectively "frozen in" and will be performed. So in each iteration, the inbound peer should do the following steps in order: + +1. Read the current decision via `psaReadDecision` +2. Then try to read any available reply from the outbound peer if there have been pipelined requests in previous rounds. It should process the reply accordingly, i.e. check that the reply conforms to the mini-protocol rules, and call either `psaOnReceiveIds` or `psaOnReceiveObjects` as needed +3. Then request objects (if any) as per `pdObjectsToReqIds`, and call `psaOnRequestObjects` accordingly +4. Then request IDs (if any) as per `pdIdsToReq` (acking `pdIdsToAck` as a side-effect), and call `psaOnRequestIds` accordingly +5. Call `psaOnDecisionExecuted` to signal that a new decision should be made for this peer + +In the implementation, steps 2-4 are performed by the `goCollect`, `goReqIds` and `goReqObjects` functions respectively of `V2.hs`. + +NOTE: The decision logic doesn't assume that we will first request objects, then only (request and) acknowledge IDs. Consequently, the decision logic won't ever ask to request objects whose IDs would be acknowledged in that same round. + +## Peer state description and lifecycle + +This diagram shows when a value (count, ID or object) is added or removed from a specific field of the `DecisionPeerState` during the execution of an inbound peer. + +Fields of `DecisionPeerState` are represented as rounded rectangles, while callbacks/functions are represented as diamond shapes. The entry point of the diagram is the `makeDecisions / psaReadDecision` node, that dictates the actions to be taken by the inbound peer, that are then reflected through the `onRequestIds` and `onRequestObjects` callbacks. ```mermaid %%{init: {"flowchart": {"htmlLabels": true}} }%% @@ -22,17 +74,18 @@ flowchart TD EA{onRequestIds} EA-->|+count| A - B -->|-ids| EA - C -->|-ids| EA + EA -->|"`-ids (ack)`"| B + EA -->|"`-ids (non-downloaded only, ack)`"| C EB{onReceiveIds} - A -->|-count| EB - EB -->|+ids| B + EB -->|-count| A + + EB -->|+ids| B IN1@{ shape: lin-cyl, label: "ids" } --o EB EB -->|+ids| C EC{onRequestObjects} - C -->|-ids| EC +E EC -->|"`-ids (selected for download only)`"| C EC -->|+ids| D ED{onReceiveObjects} @@ -40,39 +93,52 @@ flowchart TD IN2@{ shape: lin-cyl, label: "objects" } --o ED ED -->|+objects| F - EE{makeDecisions} + EE{makeDecisions / psaReadDecision} EA ~~~ EE EC ~~~ EE - EE -.->|readDecision : pdIdsToAck + pdIdsToReq + pdCanPipelineIdsReq/| EA - EE -.->|readDecision : pdObjectsToReqIds| EC + EE -.-o|pdIdsToAck + pdIdsToReq + pdCanPipelineIdsReq| EA + EE -.-o|pdObjectsToReqIds| EC EG{Added to pool} F -->|-objects| EG ``` -The inbound peer (defined in `V2.hs`) has no direct access to the state. It only has access to a monadic API `PeerStateAPI` defined in `Registry.hs`. +### Fields of `DecisionPeerState` and their lifecycle -This API has 4 callbacks, that trigger global state changes: +- `dpsNumIdsInFlight`: The cumulative number of object IDs we have asked in requests that have not yet been replied to. We need to track this to ensure we don't ask the outbound peer to keep available more objects at a given time than the protocol defined limit (see `dpMaxNumObjectsOutstanding` in `Policy.hs`). This count is incremented in `onRequestIds` by the number of requested IDs, and decremented in `onReceiveIds` by **the same number of requested IDs** when the reply is received. E.g., if we request 10 IDs, then we increment the count by 10; and if later the outbound peer replies with only 7 IDs (because it had only 7 available), we still decrement the count by 10. +- `dpsOutstandingFifo`: IDs of the objects that the outbound peer has available for us, and which we have not yet acknowledged. This is kept in the order in which the outbound peer gave them to us. It is also the order in which we acknowledge them (because acknowledgment, as in TX-submission, is made by sending the length of the prefix of the FIFO that we no longer care about, instead of providing the IDs as a set). IDs are added to this FIFO in `onReceiveIds`, and removed from this FIFO in `onRequestIds` when we acknowledge (i.e. drop) a prefix of the FIFO. +- `dpsObjectsAvailableIds`: Set of IDs of the objects that can be requested to the outbound peer, and have not yet been requested or downloaded. This is a subset of `dpsOutstandingFifo`. IDs are added to this set in `onReceiveIds`. They can be removed from this set in two ways: + - when some objects are requested by their IDs in `onRequestObjects`, the corresponding IDs are removed from `dpsObjectsAvailableIds` + - for the IDs that are voluntarily not requested (e.g. because we already have obtained them through other peers), they are removed from `dpsObjectsAvailableIds` when we acknowledge a prefix of the FIFO that contains them +- `dpsObjectsInflightIds`: The IDs of objects that have been requested to the outbound peer, but have not yet been received. IDs are added to this set in `onRequestObjects` (at the moment they are removed from `dpsObjectsAvailableIds`), and removed from this set in `onReceiveObjects` (at the moment the corresponding objects are added to `dpsObjectsOwtPool`). In ObjectDiffusion, we must receive exactly the objects that we requested, so there is no way for some items in this set to stay here indefinitely +- `dpsObjectsOwtPool`: A map of IDs to objects that have been received, and are on their way to the `ObjectPool`. As we have many inbound peers in parallel, we cannot directly insert objects into the pool when we receive them; instead, we should wait to obtain the pool lock. So we store the received objects here in the meantime, and the subroutine `submitObjectsToPool` (launched by `onReceiveObjects`) will acquire the lock and insert them into the pool when possible, and thus remove them from `dpsObjectsOwtPool` at that moment. -- `psaOnRequestIds` (corresponding to `onRequestIds` from `State.hs`) that must be called when emitting a request for new IDs (that also acks previously received IDs that we no longer care about). Under the hood, `onRequestIds` will increase the `dpsNumIdsInFlight` count by the requested number of IDs, and remove the acked IDs from `dpsOutstandingFifo` and `dpsObjectsAvailableIds`. -- `psaOnReceiveIds` (corresponding to `onReceiveIds` from `State.hs`) that must be called after receiving new IDs from the outbound peer, after validating that we received the correct number (not more than requested). Under the hood, `onReceiveIds` will decrease the `dpsNumIdsInFlight` count by **the number of IDs that were requested in the request corresponding to this reply** (it might be more than the number of received IDs), and add the received IDs to `dpsOutstandingFifo` and `dpsObjectsAvailableIds`. -- `psaOnRequestObjects` (corresponding to `onRequestObjects` from `State.hs`) that must be called when emitting a request for new objects. Under the hood, `onRequestObjects` will remove the requested IDs from `dpsObjectsAvailableIds` and add them to `dpsObjectsInflightIds`. -- `psaOnReceiveObjects` (corresponding to `onReceiveObjects` from `State.hs`) that must be called when receiving objects from the outbound peer, after validating that the received objects match exactly the requested IDs, and that all received objects have valid cryptographic proofs. Under the hood, `onReceiveObjects` will remove the received IDs from `dpsObjectsInflightIds`, and add the received objects to `dpsOwtPool`, and call the `submitObjectsToPool` subroutine that will actually insert the objects into the object pool when the lock can be acquired (at which point the objects are removed from `dpsOwtPool`) +## Acknowledgement behavior note -To know when to call request IDs or objects, the inbound peer (`V2.hs`) relies on a global-state decision procedure, running in another thread, mainly implemented in `Decision.hs` and called from `Registry.hs`. +The ID of an object is eligible for acknowledgement from a given inbound peer when: -This decision procedure refreshes `PeerDecision`s periodically for each peer. The peer can read this decision via the `psaReadDecision` callback in the `PeerStateAPI`. The decision has a flag `pdStatus` that is set to `DecisionBeingActedUpon` when a decision has been read by the peer, and stays at that state until the peer calls `psaOnDecisionCompleted`, indicating that it has executed the decision (the status is set to `DecisionCompleted`) and that the global decision logic can generate a new one for this peer (a decision can also be updated when it is in state `DecisionUnread`). While the flag is set to `DecisionBeingActedUpon`, the global-state decision logic will not update the decision for this peer (it is locked, or "frozen"). +- The corresponding object has been downloaded from its direct outbound peer, and is currently in `dpsObjectsOwtPool` of **this** inbound peer +- The corresponding object is already in the pool (either obtained through other inbound peers, or previously downloaded and inserted by this inbound peer) -In the decision, the peer can find: -- `pdIdsToReq`: number of new IDs to request from the outbound peer -- `pdIdsToAck`: a set of IDs that the peer should ack in its next request for IDs. Note that if `pdIdsToReq` is zero, then no request for IDs will be sent, and thus no acking will happen despite `pdIdsToAck` being non-zero. -- `pdCanPipelineIdsReq`: a flag indicating whether the peer can pipeline its requests for IDs (instead of making a blocking call). In ObjectDiffusion protocol specification, a peer can only pipeline requests for IDs when there are some unacknowledged IDs, i.e. when the `dpsOutstandingFifo` is not empty. -- `pdObjectsToReqIds`: the IDs of the objects that the inbound peer should request from the outbound peer. -- `pdExecutingDecision` flag is not meant to be read by the peer itself; it will always be set to `True` when the inbound peer is able to read a decision. +So even if the validation of a received object is done at the moment it is added to pool, this won't cause any trouble. Take the example of an object that is rejected by the pool (because it has invalid cryptographic signature, for example). In this case: -In a round of its main loop, the inbound peer will: -1. Read the current decision via `psaReadDecision` -2. Try to read any available reply from the outbound peer if there have been pipelined requests in previous rounds -3. Try to request objects (if any) as per `pdObjectsToReqIds` -4. Try to request IDs (if any) as per `pdIdsToReq`; acking `pdIdsToAck` as a side-effect -5. Call `psaOnDecisionExecuted` to signal that a new decision should be made for this peer +- the inbound peer that submitted the object to pool might have acked it already at the moment the object is rejected by the pool, but this means the outbound peer which sent us the object is adversarial, and we should disconnect from it anyway. So there is no harm done by having acked the object to the adversarial outbound peer, as we won't want to re-download this object from it again (or any other object whatsoever). +- any other inbound peer that has this ID available from its outbound peer won't be able to ack it because this ID isn't in their `dpsObjectsOwtPool`, and is not in the pool, so we will be able to download it from these other peers until we find a valid one. + +As in TxSubmission, acknowledgement is done by indicating to the outbound peer the length of the (longest) prefix of the oustanding FIFO that we no longer care about (i.e. for which all IDs are eligible to acknowledgment by the definition above). The field `dpsOutstandingFifo` on the inbound peer is supposed to mirror exactly the state of the FIFO of the outbound peer, bar eventual discrepancies due to in-flight information. + +## Download attribution process in `makeDecisions` + +When making decisions, we first divide the peers in two groups: +- Those who are currently executing a decision, i.e., those for which the (previous) decision in the decision channel verifies `pdStatus == DecisionBeingActedUpon`. These are further called _frozen peers_. +- Those who are not currently executing a decision, i.e., those for which the (previous) decision in the decision channel verifies `pdStatus == DecisionUnread || pdStatus == DecisionCompleted`. The former are the ones who didn't have time to read the previous decision yet, so it makes sense to recompute a more up-to-date decision for them. The latter are the ones who have completed executing the previous decision, so it also makes sense to compute a new decision for them. These two categories of peers are further called _active peers_. + +The rest of the decision logic will only aim to compute decisions for the active peers, while frozen peers will keep their previous decision until they complete executing it. But we need a few information from frozen peers to drive the decision for active peers. + +The first step is to pre-compute which acknowledgment each active peer will make on its next request for IDs, and how many IDs they should request. This is done by the `computeAck` function in `Decision.hs`, that produces partial `PeerDecision`s (i.e. their `pdObjectsToReqIds` field is not yet specified). + + wotwoThen we decide which objects shoul +re act are available from (at least) one active peer and are not in the set of IDs that will be acked by this peer on its next request for IDs (NOTE: theoretically, this second condition is redundant with other constraints) + +We then build the map of object IDs to thesset of sctive pee that h can provd build the map of object IDs to the set of active peers that h the corresponding objects are available from (at least) one active peer and are not in the set of IDs that will be acked by this peer on its next request for IDs (NOTE: theoretically, this second condition is redundant with other constraints) +We thid \ No newline at end of file diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 5592330684..bb219190c1 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -116,6 +116,11 @@ computeAck poolHasObject DecisionPolicy{dpMaxNumObjectIdsReq, dpMaxNumObjectsOut (fromIntegral dpMaxNumObjectsOutstanding - futureFifoSizeOnOutboundPeer) `min` dpMaxNumObjectIdsReq + -- TODO: in the case where pdNumIdsToReq == 0, we know we actually won't be able to ack anything + -- during this round. So it might make sense to set pdNumIdsToAck = 0 and idsToAck = mempty as well? + -- If we do this change, we could add an assert in `V2.hs` that whenever pdNumIdsToReq == 0, then pdNumIdsToAck == 0 as well + -- /!\ We should also revise documentation in V2.md accordingly + pdCanPipelineIdsRequests = not . StrictSeq.null $ dpsOutstandingFifo' peerDecision = @@ -190,6 +195,8 @@ pickObjectsToReq ( \accMap (peerAddr, DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds}) -> let -- ids that will be acked for this peer won't be available anymore, so we should not consider them in the decision logic + -- + -- TODO: this is quite redundant, because ack can only be made when the object is already in the pool (in which case it would have been filtered out anyway in next step) or when the object is in dpsObjectsOwtPool of this peer (in which case it shouldn't be anymore in dpsObjectsAvailableIds) idsToAckForThisPeer = Map.findWithDefault (error "invariant violated: peer must be in peerToIdsToAck map") diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 4cd5f57275..24507e1e50 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -157,6 +157,7 @@ withPeer peerToChannel <- readTVar decisionChannelsVar decisionChan <- case peerToChannel Map.!? peerAddr of -- Checks if a channel already exists for this peer, in case we reuse it + -- Should not happen normally, because we unregister the peer from the channels map on disconnection through the bracket function Just chan -> return chan -- Otherwise create a new channel and register it Nothing -> do From 919796a7bb217d2083a7f4f3767c220389c0ea45 Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Wed, 22 Oct 2025 18:16:20 +0200 Subject: [PATCH 38/43] "Fix" mermaid diagram --- .../Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md index 083f6e12d2..b1e76ef577 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md @@ -79,17 +79,16 @@ flowchart TD EB{onReceiveIds} EB -->|-count| A - - EB -->|+ids| B + EB -->|+ids| B IN1@{ shape: lin-cyl, label: "ids" } --o EB EB -->|+ids| C EC{onRequestObjects} -E EC -->|"`-ids (selected for download only)`"| C + EC -->|"`-ids (selected for download only)`"| C EC -->|+ids| D ED{onReceiveObjects} - D -->|-ids| ED + ED -->|-ids| D IN2@{ shape: lin-cyl, label: "objects" } --o ED ED -->|+objects| F @@ -100,7 +99,7 @@ E EC -->|"`-ids (selected for download only)`"| C EE -.-o|pdObjectsToReqIds| EC EG{Added to pool} - F -->|-objects| EG + EG -->|-objects| F ``` ### Fields of `DecisionPeerState` and their lifecycle From 87e8ee642e9fd1bb88ad2a738fb9a6c25295eb1b Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Wed, 22 Oct 2025 18:29:45 +0200 Subject: [PATCH 39/43] More documentation and fixes --- .../MiniProtocol/ObjectDiffusion/Inbound/V2.md | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md index b1e76ef577..50f7def6bc 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md @@ -1,18 +1,17 @@ # Object Diffusion Inbound Mini-Protocol V2 -### TODOs +This document describes the inner workings of the inbound side of the `ObjectDiffusion` mini-protocol. A broad description of this protocol can be found in section 2.5 of [this document](https://tweag.github.io/cardano-peras/peras-design.pdf). + +## TODOs - what happens when a peer gets disconnected: ensure correctness - describe the strategy for individual peer decisions -- talk about contraints / partial functions - describe lifetime of id + related object - list differences with TxSubmissionV ## General architecture -This document describes the inner workings of the `ObjectDiffusion` mini-prot - -In ObjectDiffusion V2 (only the inbound side changes compared to V1), each connection to an outbound peer is no longer considered in isolation. Instead, there is a global state `DecisionGlobalState` (defined in `Types.hs`) that tracks the state of all connections together using `dgsPeerStates :: Map peerAddr (DecisionPeerState objectId object)`. Currently, this field is the only of of `DecisionGlobalState`, meaning that we use no global data other than the sum of the peer's. Further on, we denote _an instance of the inbound protocol connected to a specific outbound peer_ simply as _an inbound peer_. +In `ObjectDiffusion` V2 (only the inbound side changes compared to V1), each connection to an outbound peer is no longer considered in isolation. Instead, there is a global state `DecisionGlobalState` (defined in `Types.hs`) that tracks the state of all connections together using `dgsPeerStates :: Map peerAddr (DecisionPeerState objectId object)`. Currently, this field is the only one of `DecisionGlobalState`, meaning that we use no global data other than the sum of the peer's. Further on, we denote _an instance of the inbound protocol connected to a specific outbound peer_ simply as _an inbound peer_. A `DecisionPeerState` holds the state of the interaction with the distant outbound peer, which is described in more details in [this section](#fields-of-decisionpeerstate-and-their-lifecycle). @@ -23,7 +22,7 @@ The global state is read periodically from a dedicated _decision_ thread (define - `pdCanPipelineIdsReq`: a flag indicating whether the peer can pipeline its requests for IDs (instead of making a blocking call). - `pdObjectsToReqIds`: the IDs of the objects that the inbound peer should request from the outbound peer. -An inbound peer (defined in `V2.hs`) has no direct access to the state, neither in write or read fashion. It only has access to a monadic API `PeerStateAPI` defined in `Registry.hs`. This API has 2 decisions-related callbacks, and 4 state-mutating callbacks. It should follow the decision and call the state-mutating callbacks accordingly to keep the global state consistent with the actions taken. +An inbound peer (defined in `V2.hs`) has no direct access to the state, neither in write nor read fashion. It only has access to a monadic API `PeerStateAPI` defined in `Registry.hs`. This API has 2 decisions-related callbacks, and 4 state-mutating callbacks. It should follow the decision and call the state-mutating callbacks accordingly to keep the global state consistent with the actions taken. **Decision-related callbacks:** @@ -41,7 +40,7 @@ These are the callbacks that the inbound peer must call when it takes the corres - `psaOnRequestObjects` (corresponding to `onRequestObjects` from `State.hs`) that must be called when emitting a request for new objects. Under the hood, `onRequestObjects` will remove the requested IDs from `dpsObjectsAvailableIds` and add them to `dpsObjectsInflightIds`. - `psaOnReceiveObjects` (corresponding to `onReceiveObjects` from `State.hs`) that must be called when receiving objects from the outbound peer. Under the hood, `onReceiveObjects` will remove the received IDs from `dpsObjectsInflightIds`, and add the received objects to `dpsOwtPool`, and call the `submitObjectsToPool` subroutine that will actually insert the objects into the object pool when the lock can be acquired (at which point the objects are removed from `dpsOwtPool`) -NOTE: Protocol error-handling (e.g. making sure the outbound peer has sent the correct information) is be done by the callback themselves, so the inbound peer doesn't have to check anything before calling these state-mutating callbacks. +NOTE: Protocol error-handling (e.g. making sure the outbound peer has sent the correct information) is done by the callback themselves, so the inbound peer doesn't have to check anything before calling these state-mutating callbacks. Preconditions that should hold, but don't due to implementation errors, are tested with `assert` throughout the code. This ensures a modicum of correctness as long as the code is sufficiently tested. ## Inbound peer loop From fab43c70dade97b2d664c9b09760668be675a667 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Wed, 22 Oct 2025 18:31:34 +0200 Subject: [PATCH 40/43] Further edits to doc --- .../ObjectDiffusion/Inbound/V2.md | 23 +++++++++++++++---- .../ObjectDiffusion/Inbound/V2/Decision.hs | 4 ++-- .../ObjectDiffusion/Inbound/V2/Policy.hs | 4 ++-- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md index 50f7def6bc..27bd1b321c 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md @@ -135,8 +135,23 @@ The rest of the decision logic will only aim to compute decisions for the active The first step is to pre-compute which acknowledgment each active peer will make on its next request for IDs, and how many IDs they should request. This is done by the `computeAck` function in `Decision.hs`, that produces partial `PeerDecision`s (i.e. their `pdObjectsToReqIds` field is not yet specified). - wotwoThen we decide which objects shoul -re act are available from (at least) one active peer and are not in the set of IDs that will be acked by this peer on its next request for IDs (NOTE: theoretically, this second condition is redundant with other constraints) +Then we decide which objects should be downloaded from which active peer in the `pickObjectsToReq` function. -We then build the map of object IDs to thesset of sctive pee that h can provd build the map of object IDs to the set of active peers that h the corresponding objects are available from (at least) one active peer and are not in the set of IDs that will be acked by this peer on its next request for IDs (NOTE: theoretically, this second condition is redundant with other constraints) -We thid \ No newline at end of file +More concretely, we list from each peer which are the interesting available objects, i.e. the objects that match this two criteria: +- They are not already in the pool +- They are available from the peer, and won't be acked by the peer on its next request for IDs according to the partial decision computed by `computeAck` at the previous step (NOTE: theoretically, this second condition is redundant with other constraints and invariants of the protocol). + +Then we "reverse" this mapping to obtain a map of object IDs to the set of active peers that have the corresponding interesting objects available (according to the criteria above), further called _potential providers_. + +Now, we consider how many copies of each object are already in the process of being acquired. We count as "in the process of being acquired" any object that is either: +- in `dpsObjectsInFlightIds` of any active peer +- in `dpsObjectsInFlightIds` **or** in `pdObjectsToReqIds` of any frozen peer (because we consider that a frozen peer will execute its decision to completion, even if `onRequestObjects`, that adds items to `dpsObjectsInFlightIds`, hasn't been called yet by it) +- in `dpsObjectsOwtPool` of any peer + +For each object, sequentially, we then try to select as many providers as the difference between the redundancy target (`dpTargetObjectRedundancy` in `Policy.hs`) and the number of copies of this object already in the process of being acquired. But we also make sure, when selecting providers, that we don't go beyond the limit of objects in flight for each potential provider, and that we don't go beyond the limit of total objects in flight for our node too (defined by `dpMaxNumObjectsInflightPerPeer` and `dpMaxNumObjectsInflightTotal` in `Policy.hs`). + +The result is a map from active peers to the set of object IDs that should be requested from them. This map is then merged with the partial decisions computed by `computeAck` to produce the final decisions for each active peer, that are then propagated to the peers through their decision channels (in `Registry.hs`). + +At the moment, the algorithm is eager towards securing the target number of copies for each object, at the detriment of object coverage and peer load balancing. Future improvements could be made to address this if needed. + +NOTE: the decision logic doesn't make any changes to the global state; it only reads it. All changes to the global state are made by the inbound peers through the `PeerStateAPI` callbacks. diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index bb219190c1..8548189d88 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -172,7 +172,7 @@ pickObjectsToReq DecisionPolicy { dpMaxNumObjectsInflightPerPeer , dpMaxNumObjectsInflightTotal - , dpMaxObjectInflightMultiplicity + , dpTargetObjectRedundancy } DecisionGlobalState { dgsPeerStates @@ -321,7 +321,7 @@ pickObjectsToReq shouldSelect = -- We should not go over the multiplicity limit per object - objectMultiplicity + expectedMultiplicity < dpMaxObjectInflightMultiplicity + objectMultiplicity + expectedMultiplicity < dpTargetObjectRedundancy -- We should not go over the total number of objects inflight limit && totalNumObjectsInflight + totalNumObjectsToReq < dpMaxNumObjectsInflightTotal -- We should not go over the per-peer number of objects inflight limit diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs index b840b0a0f2..f51a5cbfa3 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -20,7 +20,7 @@ data DecisionPolicy = DecisionPolicy -- ^ a limit of objects in-flight from a single peer. , dpMaxNumObjectsInflightTotal :: !NumObjectsReq -- ^ a limit of objects in-flight from all peers for this node. - , dpMaxObjectInflightMultiplicity :: !ObjectMultiplicity + , dpTargetObjectRedundancy :: !ObjectMultiplicity -- ^ from how many peers download the `objectId` simultaneously } deriving Show @@ -32,5 +32,5 @@ defaultDecisionPolicy = , dpMaxNumObjectsOutstanding = 10 -- must be the same as the outbound peer's value , dpMaxNumObjectsInflightPerPeer = 6 , dpMaxNumObjectsInflightTotal = 20 - , dpMaxObjectInflightMultiplicity = 2 + , dpTargetObjectRedundancy = 2 } From ff925ad086f82ea1631b2ce634d6374b30e2a912 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Wed, 22 Oct 2025 19:18:45 +0200 Subject: [PATCH 41/43] Protocol implem design document is ready! --- .../ObjectDiffusion/Inbound/V2.md | 72 +++++++++++++------ 1 file changed, 51 insertions(+), 21 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md index 27bd1b321c..29e81edf4f 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2.md @@ -1,13 +1,15 @@ # Object Diffusion Inbound Mini-Protocol V2 -This document describes the inner workings of the inbound side of the `ObjectDiffusion` mini-protocol. A broad description of this protocol can be found in section 2.5 of [this document](https://tweag.github.io/cardano-peras/peras-design.pdf). +This document describes the inner workings of the inbound side of the `ObjectDiffusion` mini-protocol. A broad description of the whole protocol can be found in section 2.5 of [this document](https://tweag.github.io/cardano-peras/peras-design.pdf). -## TODOs - -- what happens when a peer gets disconnected: ensure correctness -- describe the strategy for individual peer decisions -- describe lifetime of id + related object -- list differences with TxSubmissionV +- [Object Diffusion Inbound Mini-Protocol V2](#object-diffusion-inbound-mini-protocol-v2) + - [General architecture](#general-architecture) + - [Inbound peer loop](#inbound-peer-loop) + - [Peer state description and lifecycle](#peer-state-description-and-lifecycle) + - [Acknowledgement behavior](#acknowledgement-behavior) + - [Download attribution process in `makeDecisions`](#download-attribution-process-in-makedecisions) + - [On peer disconnection](#on-peer-disconnection) + - [Differences with TxSubmission V2 inbound mini-protocol](#differences-with-txsubmission-v2-inbound-mini-protocol) ## General architecture @@ -18,9 +20,9 @@ A `DecisionPeerState` holds the state of the interaction with the distant outbou The global state is read periodically from a dedicated _decision_ thread (defined in `Registry.hs` and `Decision.hs`), and for each inbound peer computes a `PeerDecision` (defined in `Types.hs`) that indicates what the inbound peer should do next. More specifically, in the decision, the inbound peer can find: - `pdIdsToReq`: number of new IDs to request from the outbound peer -- `pdIdsToAck`: a set of IDs that the peer should ack in its next request for IDs. Note that if `pdIdsToReq` is zero, then no request for IDs will be sent, and thus no acking will happen despite `pdIdsToAck` being non-zero. +- `pdIdsToAck`: number of IDs that the peer should ack in its next request for IDs. Note that if `pdIdsToReq` is zero, then no request for IDs will be sent, and thus no acknowledgment will happen despite `pdIdsToAck` being non-zero (we might change the decision process in the future to get rid of this non-intuitive case). - `pdCanPipelineIdsReq`: a flag indicating whether the peer can pipeline its requests for IDs (instead of making a blocking call). -- `pdObjectsToReqIds`: the IDs of the objects that the inbound peer should request from the outbound peer. +- `pdObjectsToReqIds`: the set of IDs of the objects that the inbound peer should request from the outbound peer. An inbound peer (defined in `V2.hs`) has no direct access to the state, neither in write nor read fashion. It only has access to a monadic API `PeerStateAPI` defined in `Registry.hs`. This API has 2 decisions-related callbacks, and 4 state-mutating callbacks. It should follow the decision and call the state-mutating callbacks accordingly to keep the global state consistent with the actions taken. @@ -29,15 +31,15 @@ An inbound peer (defined in `V2.hs`) has no direct access to the state, neither - `psaReadDecision` that allows the inbound peer to read the current `PeerDecision` made for itself by the last round of the decision logic. This will block if a new decision is not yet available for this peer. - `psaOnDecisionCompleted` that allows the inbound peer to signal that it has executed the last decision it read, and that the decision logic should now compute a new decision for this peer -The decision additionally has a flag `pdStatus` (not intended to be read by the peer) that is set to `DecisionBeingActedUpon` when `psaReadDecision`, and stays at that value until the peer calls `psaOnDecisionCompleted`, indicating that it has executed the decision (the status is set to `DecisionCompleted`). This is the main way the peer interacts with the decision thread. While the flag is set to `DecisionBeingActedUpon`, the global-state decision logic will not update the decision for this peer (it is locked, or "frozen"). +The decision additionally has a flag `pdStatus` (not intended to be read by the peer) that is set to `DecisionBeingActedUpon` when `psaReadDecision` returns, and stays at that value until the peer calls `psaOnDecisionCompleted`, indicating that it has executed the decision (at which point the status is set to `DecisionCompleted`). This is the main way the peer interacts with the decision thread. While the flag is set to `DecisionBeingActedUpon`, the global-state decision logic will not update the decision for this peer (it is locked, or "frozen"). **State-mutating callbacks:** -These are the callbacks that the inbound peer must call when it takes the corresponding actions that has been dictated by the decision it read. These callbacks will update the corresponding peer state. For reference, the fields of this state are documented in [this section](#fields-of-decisionpeerstate-and-their-lifecycle) +These are the callbacks that the inbound peer must call when it takes the corresponding actions that has been dictated by the decision it read. These callbacks will update the corresponding peer state in the global state. For reference, the fields of this state are documented in [this section](#fields-of-decisionpeerstate-and-their-lifecycle). -- `psaOnRequestIds` (corresponding to `onRequestIds` from `State.hs`) that must be called when emitting a request for new IDs (that also acks previously received IDs that we no longer care about). Under the hood, `onRequestIds` will increase the `dpsNumIdsInFlight` count by the requested number of IDs, and remove the acked IDs from `dpsOutstandingFifo` and `dpsObjectsAvailableIds`. +- `psaOnRequestIds` (corresponding to `onRequestIds` from `State.hs`) that must be called when emitting a request for a non-zero amount of new IDs (that will also acks previously received IDs that we no longer care about). Under the hood, `onRequestIds` will increase the `dpsNumIdsInFlight` count by the requested number of IDs, and remove the acked IDs from `dpsOutstandingFifo` and `dpsObjectsAvailableIds`. - `psaOnReceiveIds` (corresponding to `onReceiveIds` from `State.hs`) that must be called after receiving new IDs from the outbound peer. Under the hood, `onReceiveIds` will decrease the `dpsNumIdsInFlight` count by **the number of IDs that were requested in the request corresponding to this reply** (it might be more than the number of received IDs), and add the received IDs to `dpsOutstandingFifo` and `dpsObjectsAvailableIds`. -- `psaOnRequestObjects` (corresponding to `onRequestObjects` from `State.hs`) that must be called when emitting a request for new objects. Under the hood, `onRequestObjects` will remove the requested IDs from `dpsObjectsAvailableIds` and add them to `dpsObjectsInflightIds`. +- `psaOnRequestObjects` (corresponding to `onRequestObjects` from `State.hs`) that must be called when emitting a request for a non-zero amount of new objects. Under the hood, `onRequestObjects` will remove the requested IDs from `dpsObjectsAvailableIds` and add them to `dpsObjectsInflightIds`. - `psaOnReceiveObjects` (corresponding to `onReceiveObjects` from `State.hs`) that must be called when receiving objects from the outbound peer. Under the hood, `onReceiveObjects` will remove the received IDs from `dpsObjectsInflightIds`, and add the received objects to `dpsOwtPool`, and call the `submitObjectsToPool` subroutine that will actually insert the objects into the object pool when the lock can be acquired (at which point the objects are removed from `dpsOwtPool`) NOTE: Protocol error-handling (e.g. making sure the outbound peer has sent the correct information) is done by the callback themselves, so the inbound peer doesn't have to check anything before calling these state-mutating callbacks. Preconditions that should hold, but don't due to implementation errors, are tested with `assert` throughout the code. This ensures a modicum of correctness as long as the code is sufficiently tested. @@ -52,16 +54,20 @@ The inbound peer performs a loop where each iteration starts with (blocking on) 4. Then request IDs (if any) as per `pdIdsToReq` (acking `pdIdsToAck` as a side-effect), and call `psaOnRequestIds` accordingly 5. Call `psaOnDecisionExecuted` to signal that a new decision should be made for this peer -In the implementation, steps 2-4 are performed by the `goCollect`, `goReqIds` and `goReqObjects` functions respectively of `V2.hs`. +In the implementation, steps 2, 3, 4 are performed by the `goCollect`, `goReqIds` and `goReqObjects` functions in `V2.hs` that each call the next one in sequence as needed. NOTE: The decision logic doesn't assume that we will first request objects, then only (request and) acknowledge IDs. Consequently, the decision logic won't ever ask to request objects whose IDs would be acknowledged in that same round. ## Peer state description and lifecycle -This diagram shows when a value (count, ID or object) is added or removed from a specific field of the `DecisionPeerState` during the execution of an inbound peer. +The following diagram indicates when and by whom fields of the `DecisionPeerState` of an inbound peer are modified. Fields of `DecisionPeerState` are represented as rounded rectangles, while callbacks/functions are represented as diamond shapes. The entry point of the diagram is the `makeDecisions / psaReadDecision` node, that dictates the actions to be taken by the inbound peer, that are then reflected through the `onRequestIds` and `onRequestObjects` callbacks. +Normal arrows `->` take their source from a function, and points towards a field that is modified by this function. The label on the arrow indicates, by the sign, whether something is added _or_ removed from the field, and also the nature of the value (count, ids, objects) being added or removed. + +Arrows with rounded head show an external input of data, i.e. when the inbound peer actually receives data from the outbound peer. + ```mermaid %%{init: {"flowchart": {"htmlLabels": true}} }%% flowchart TD @@ -107,27 +113,28 @@ flowchart TD - `dpsOutstandingFifo`: IDs of the objects that the outbound peer has available for us, and which we have not yet acknowledged. This is kept in the order in which the outbound peer gave them to us. It is also the order in which we acknowledge them (because acknowledgment, as in TX-submission, is made by sending the length of the prefix of the FIFO that we no longer care about, instead of providing the IDs as a set). IDs are added to this FIFO in `onReceiveIds`, and removed from this FIFO in `onRequestIds` when we acknowledge (i.e. drop) a prefix of the FIFO. - `dpsObjectsAvailableIds`: Set of IDs of the objects that can be requested to the outbound peer, and have not yet been requested or downloaded. This is a subset of `dpsOutstandingFifo`. IDs are added to this set in `onReceiveIds`. They can be removed from this set in two ways: - when some objects are requested by their IDs in `onRequestObjects`, the corresponding IDs are removed from `dpsObjectsAvailableIds` - - for the IDs that are voluntarily not requested (e.g. because we already have obtained them through other peers), they are removed from `dpsObjectsAvailableIds` when we acknowledge a prefix of the FIFO that contains them + - for the IDs that were voluntarily not requested (e.g. because we already have obtained them through other peers), they are removed from `dpsObjectsAvailableIds` when we acknowledge a prefix of the FIFO that contains them - `dpsObjectsInflightIds`: The IDs of objects that have been requested to the outbound peer, but have not yet been received. IDs are added to this set in `onRequestObjects` (at the moment they are removed from `dpsObjectsAvailableIds`), and removed from this set in `onReceiveObjects` (at the moment the corresponding objects are added to `dpsObjectsOwtPool`). In ObjectDiffusion, we must receive exactly the objects that we requested, so there is no way for some items in this set to stay here indefinitely - `dpsObjectsOwtPool`: A map of IDs to objects that have been received, and are on their way to the `ObjectPool`. As we have many inbound peers in parallel, we cannot directly insert objects into the pool when we receive them; instead, we should wait to obtain the pool lock. So we store the received objects here in the meantime, and the subroutine `submitObjectsToPool` (launched by `onReceiveObjects`) will acquire the lock and insert them into the pool when possible, and thus remove them from `dpsObjectsOwtPool` at that moment. -## Acknowledgement behavior note +## Acknowledgement behavior The ID of an object is eligible for acknowledgement from a given inbound peer when: - The corresponding object has been downloaded from its direct outbound peer, and is currently in `dpsObjectsOwtPool` of **this** inbound peer - The corresponding object is already in the pool (either obtained through other inbound peers, or previously downloaded and inserted by this inbound peer) -So even if the validation of a received object is done at the moment it is added to pool, this won't cause any trouble. Take the example of an object that is rejected by the pool (because it has invalid cryptographic signature, for example). In this case: +So even if the validation of a received object is done at the moment the object is added to pool, there won't be any issue. Take the example of an object that is rejected by the pool (because it has invalid cryptographic signature, for example). In this case: -- the inbound peer that submitted the object to pool might have acked it already at the moment the object is rejected by the pool, but this means the outbound peer which sent us the object is adversarial, and we should disconnect from it anyway. So there is no harm done by having acked the object to the adversarial outbound peer, as we won't want to re-download this object from it again (or any other object whatsoever). -- any other inbound peer that has this ID available from its outbound peer won't be able to ack it because this ID isn't in their `dpsObjectsOwtPool`, and is not in the pool, so we will be able to download it from these other peers until we find a valid one. +- the inbound peer that submitted the object to pool might have acked it already at the moment the object is rejected by the pool, but the rejection indicates that the outbound peer which sent us the object is adversarial, and we should disconnect from it anyway. So there is no harm done by having acked the object to the adversarial outbound peer, as we won't want to re-download this object from it again (or any other object whatsoever). +- any other inbound peer that has this ID available from its outbound peer won't be able to ack it because this ID isn't in **their** `dpsObjectsOwtPool`, and is not in the pool either, so we will be able to download it from these other peers until we find a valid one. As in TxSubmission, acknowledgement is done by indicating to the outbound peer the length of the (longest) prefix of the oustanding FIFO that we no longer care about (i.e. for which all IDs are eligible to acknowledgment by the definition above). The field `dpsOutstandingFifo` on the inbound peer is supposed to mirror exactly the state of the FIFO of the outbound peer, bar eventual discrepancies due to in-flight information. ## Download attribution process in `makeDecisions` When making decisions, we first divide the peers in two groups: + - Those who are currently executing a decision, i.e., those for which the (previous) decision in the decision channel verifies `pdStatus == DecisionBeingActedUpon`. These are further called _frozen peers_. - Those who are not currently executing a decision, i.e., those for which the (previous) decision in the decision channel verifies `pdStatus == DecisionUnread || pdStatus == DecisionCompleted`. The former are the ones who didn't have time to read the previous decision yet, so it makes sense to recompute a more up-to-date decision for them. The latter are the ones who have completed executing the previous decision, so it also makes sense to compute a new decision for them. These two categories of peers are further called _active peers_. @@ -138,12 +145,14 @@ The first step is to pre-compute which acknowledgment each active peer will make Then we decide which objects should be downloaded from which active peer in the `pickObjectsToReq` function. More concretely, we list from each peer which are the interesting available objects, i.e. the objects that match this two criteria: + - They are not already in the pool -- They are available from the peer, and won't be acked by the peer on its next request for IDs according to the partial decision computed by `computeAck` at the previous step (NOTE: theoretically, this second condition is redundant with other constraints and invariants of the protocol). +- They are available from the peer, and won't be acked by the peer on its next request for IDs according to the partial decision computed by `computeAck` at the previous step (NOTE: theoretically, this second condition is redundant with other constraints and invariants of the current implementation). Then we "reverse" this mapping to obtain a map of object IDs to the set of active peers that have the corresponding interesting objects available (according to the criteria above), further called _potential providers_. Now, we consider how many copies of each object are already in the process of being acquired. We count as "in the process of being acquired" any object that is either: + - in `dpsObjectsInFlightIds` of any active peer - in `dpsObjectsInFlightIds` **or** in `pdObjectsToReqIds` of any frozen peer (because we consider that a frozen peer will execute its decision to completion, even if `onRequestObjects`, that adds items to `dpsObjectsInFlightIds`, hasn't been called yet by it) - in `dpsObjectsOwtPool` of any peer @@ -155,3 +164,24 @@ The result is a map from active peers to the set of object IDs that should be re At the moment, the algorithm is eager towards securing the target number of copies for each object, at the detriment of object coverage and peer load balancing. Future improvements could be made to address this if needed. NOTE: the decision logic doesn't make any changes to the global state; it only reads it. All changes to the global state are made by the inbound peers through the `PeerStateAPI` callbacks. + +## On peer disconnection + +The inbound peers are registered in the global state and decision channels map through a `bracket` function in `Registry.hs`. When a peer disconnects, the corresponding entry in the decision channels map and global state are automatically removed. + +As the global state is only a map of per-peer states at the moment, this means that we don't need to take any other particular action to clean up the global state following the disconnection of a peer. + +Any error protocol-wise (e.g. receiving invalid data from the outbound peer) or receiving objects that are rejected by the pool (e.g. if they don't have valid cryptographic signatures) should throw an exception, that will automatically lead to disconnection (and thus triggering cleanup). + +Following a peer disconnection, the next round of decision-logic will readjust accordingly. For example, if some object was in the process of being downloaded from the disconnected peer, the next round of the decision logic will see that we have fewer copies in the process of being acquired than before, and thus will ask other providers to download it. + +## Differences with TxSubmission V2 inbound mini-protocol + +Although both mini-protocol inbound implementations share the same general structure (global state and global decision thread, with peer registering through a bracket function, peer interacting through an API with the global state), there are some major differences in the implementation: + +- ObjectDiffusion decision process doesn't modify the global state at all, unlike TxSubmission one. This is true for acknowledgment computation too (that is part of decision making). Instead, all modifications to the global state are made by the inbound peers through the `PeerStateAPI` callbacks. This makes the decision logic more straighforward +- ObjectDiffusion decision process doesn't pre-filter peers based on their individual `DecisionPeerState` to know whether or not we should generate a decision for them. Instead, we use the `pdStatus` field of the decision, updated through the `psaReadDecision` and `psaOnDecisionCompleted` callbacks, to know whether or not we should compute a new decision for a peer. The conditions on which we compute a new decision are also different: we compute a new decision for a peer if it is not currently executing a decision (i.e. its status is `DecisionUnread` or `DecisionCompleted`), instead of checking various fields of its `DecisionPeerState`. +- ObjectDiffusion relies on `opwHasObject` method of the `ObjectPoolWrapper` to know whether or not an object is already in the pool, instead of tracking this information in the global state with a retention delay. This simplifies the global state and implementation a lot, but depends on the implementation of the `ObjectPoolWriter` to provide a fairly cost-efficient implementation for `opwHasObject`, as it is called often. +- Similarly, ObjectDiffusion gets rid of many global maps that were slightly redundant with information already present in each peer's state, as the only time we need to use these maps are during decision-making. So for the time being, we recompute the specific parts of this global view that we need at each round of decision-making, instead of maintaining them up-to-date at all times. We might need to revisit this later for performance purposes if needed. +- ObjectDiffusion also doesn't have a concept of ranking/scoring for peers, as an invalid object must lead to immediate disconnection. So the decision logic doesn't need to consider peer quality when attributing downloads. +- In ObjectDiffusion, the global state is not modified directly outside of `State.hs` (and `Registry.hs` when registering/unregistering peers). From 7fa719ced376d5e6142a6268965781bec6c3e5bb Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Thu, 23 Oct 2025 23:26:00 +0200 Subject: [PATCH 42/43] WIP: benchmarks for object diffusion logic --- .../bench/ObjectDiffusion-bench/Main.hs | 97 +++++++++++++++++++ ouroboros-consensus/ouroboros-consensus.cabal | 17 ++++ .../ObjectDiffusion/Inbound/V2/Decision.hs | 63 +++++++++++- .../ObjectDiffusion/Inbound/V2/Policy.hs | 14 ++- .../ObjectDiffusion/Inbound/V2/Types.hs | 55 ++++++++++- 5 files changed, 239 insertions(+), 7 deletions(-) create mode 100644 ouroboros-consensus/bench/ObjectDiffusion-bench/Main.hs diff --git a/ouroboros-consensus/bench/ObjectDiffusion-bench/Main.hs b/ouroboros-consensus/bench/ObjectDiffusion-bench/Main.hs new file mode 100644 index 0000000000..bd8f41f2f3 --- /dev/null +++ b/ouroboros-consensus/bench/ObjectDiffusion-bench/Main.hs @@ -0,0 +1,97 @@ +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE TypeApplications #-} + +-- | This module contains benchmarks for Peras Object diffusion decision logic +-- as implemented by the by the function +-- 'Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision.makeDecision' +module Main (main) where + +import Control.DeepSeq (NFData (..)) +import Control.Exception (evaluate) +import Data.Hashable (Hashable) +import Debug.Trace (traceMarkerIO) +import GHC.Generics (Generic) +import System.Random.SplitMix qualified as SM +import Test.Tasty.Bench +import Test.QuickCheck (Arbitrary (..)) + +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision qualified as OD + +-- TODO: We will probably want to use the actual types used in vote/cert diffusion, +-- instead of placeholders. +newtype DummyPeerAddr = DummyPeerAddr Int + deriving (Eq, Ord, Generic, NFData) + +instance Arbitrary DummyPeerAddr where + arbitrary = DummyPeerAddr <$> arbitrary + +newtype DummyObjectId = DummyObjectId Int + deriving (Eq, Ord, Generic, Hashable, NFData) + +instance Arbitrary DummyObjectId where + arbitrary = DummyObjectId <$> arbitrary + +data DummyObject = DummyObject + { doId :: DummyObjectId + , doPayload :: () + } + deriving (Generic, NFData) + +instance Arbitrary DummyObject where + arbitrary = DummyObject <$> arbitrary <*> arbitrary + +main :: IO () +main = + defaultMain + [ bgroup "ouroboros-consensus:ObjectDiffusion" + [ bgroup "VoteDiffusion" + [ env + (do let a = OD.mkDecisionContext (SM.mkSMGen 123) 10 + evaluate (rnf a) + traceMarkerIO "evaluated decision context" + return a + ) + (\a -> bench "makeDecisions: 10" $ + nf makeVoteDiffusionDecision a + ) + , env + (do let a = OD.mkDecisionContext (SM.mkSMGen 456) 100 + evaluate (rnf a) + traceMarkerIO "evaluated decision context" + return a + ) + (\a -> bench "makeDecisions: 100" $ + nf makeVoteDiffusionDecision a + ) + , env + (do let a = OD.mkDecisionContext (SM.mkSMGen 789) 1_000 + evaluate (rnf a) + traceMarkerIO "evaluated decision context" + return a + ) + (\a -> bench "makeDecisions: 1_000" $ + nf makeVoteDiffusionDecision a + ) + ] + , bgroup "CertDiffusion" [] + ] + ] + where + -- TODO: We probably want to use the decision policy for vote/cert diffusion + -- instead of an arbitrary one. + makeVoteDiffusionDecision = \OD.DecisionContext + { OD.dcRng + , OD.dcHasObject + , OD.dcDecisionPolicy + , OD.dcGlobalState + , OD.dcPrevDecisions + } -> OD.makeDecisions @DummyPeerAddr @DummyObjectId @DummyObject + dcRng + dcHasObject + dcDecisionPolicy + dcGlobalState + dcPrevDecisions diff --git a/ouroboros-consensus/ouroboros-consensus.cabal b/ouroboros-consensus/ouroboros-consensus.cabal index 880a7f8d94..283166c5a5 100644 --- a/ouroboros-consensus/ouroboros-consensus.cabal +++ b/ouroboros-consensus/ouroboros-consensus.cabal @@ -359,6 +359,7 @@ library ouroboros-network-protocols ^>=0.15, primitive, psqueues ^>=0.2.3, + QuickCheck, quiet ^>=0.2, random, rawlock ^>=0.1.1, @@ -369,6 +370,7 @@ library small-steps ^>=1.1, sop-core ^>=0.5, sop-extras ^>=0.4, + splitmix, streaming, strict >=0.1 && <0.6, strict-checked-vars ^>=0.2, @@ -867,6 +869,21 @@ benchmark PerasCertDB-bench tasty-bench, unstable-consensus-testlib, +benchmark ObjectDiffusion-bench + import: common-bench + type: exitcode-stdio-1.0 + hs-source-dirs: bench/ObjectDiffusion-bench + main-is: Main.hs + other-modules: + build-depends: + base, + deepseq, + hashable, + ouroboros-consensus, + QuickCheck, + splitmix, + tasty-bench, + test-suite doctest import: common-test main-is: doctest.hs diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 8548189d88..7b1dfe2698 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -7,13 +7,16 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision ( PeerDecision (..) - , mempty + , makeDecisions -- * Internal API exposed for testing - , makeDecisions + , DecisionContext (..) + , mkDecisionContext ) where +import Control.DeepSeq (NFData (..)) import Data.Foldable qualified as Foldable +import Data.Hashable (Hashable (..)) import Data.Map.Merge.Strict qualified as Map import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map @@ -25,7 +28,61 @@ import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Network.Protocol.ObjectDiffusion.Type -import System.Random (StdGen) +import Test.QuickCheck (Arbitrary (..)) +import Test.QuickCheck.Gen (Gen (..)) +import Test.QuickCheck.Random (QCGen (..)) +import System.Random.SplitMix (SMGen, nextInt) +import System.Random (StdGen, mkStdGen) + +data DecisionContext peerAddr objectId object = DecisionContext + { dcRng :: StdGen + , dcHasObject :: (objectId -> Bool) + , dcDecisionPolicy :: DecisionPolicy + , dcGlobalState :: DecisionGlobalState peerAddr objectId object + , dcPrevDecisions :: Map peerAddr (PeerDecision objectId object) + } + +instance + ( NFData peerAddr + , NFData objectId + , NFData object + ) => + NFData (DecisionContext peerAddr objectId object) where + rnf = undefined + +-- TODO: do not generate dcDecisionPolicy arbitrarily, it makes little sense. +-- Instead we should provide decision policies fit for the concrete object types +-- we want to make decisions for. +mkDecisionContext :: + forall peerAddr objectId object. + ( Arbitrary peerAddr + , Arbitrary objectId + , Arbitrary object + , Ord peerAddr + , Ord objectId + , Hashable objectId + ) => + SMGen -> + Int -> + DecisionContext peerAddr objectId object +mkDecisionContext stdGen size = unGen gen (QCGen stdGen') size + where + (salt, stdGen') = nextInt stdGen + gen :: Gen (DecisionContext peerAddr objectId object) + gen = do + dcRng <- mkStdGen <$> arbitrary + dcDecisionPolicy <- arbitrary + dcGlobalState <- arbitrary + dcPrevDecisions <- arbitrary + let dcHasObject objId = + hashWithSalt salt objId `mod` 2 == 0 + pure $ DecisionContext + { dcRng + , dcHasObject + , dcDecisionPolicy + , dcGlobalState + , dcPrevDecisions + } strictSeqToSet :: Ord a => StrictSeq a -> Set a strictSeqToSet = Set.fromList . Foldable.toList diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs index f51a5cbfa3..b302cf50f5 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -3,10 +3,12 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy , defaultDecisionPolicy ) where -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types (ObjectMultiplicity) +import Test.QuickCheck (Arbitrary (..)) + +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types (ObjectMultiplicity (..)) import Ouroboros.Network.Protocol.ObjectDiffusion.Type ( NumObjectIdsReq (..) - , NumObjectsOutstanding + , NumObjectsOutstanding (..) , NumObjectsReq (..) ) @@ -25,6 +27,14 @@ data DecisionPolicy = DecisionPolicy } deriving Show +instance Arbitrary DecisionPolicy where + arbitrary = DecisionPolicy + <$> (NumObjectIdsReq <$> arbitrary) + <*> (NumObjectsOutstanding <$> arbitrary) + <*> (NumObjectsReq <$>arbitrary) + <*> (NumObjectsReq <$>arbitrary) + <*> (ObjectMultiplicity <$> arbitrary) + defaultDecisionPolicy :: DecisionPolicy defaultDecisionPolicy = DecisionPolicy diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 3f7dd9d41a..22313198b9 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -46,12 +46,12 @@ module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Control.Concurrent.Class.MonadSTM.Strict (MonadSTM, StrictTVar, atomically, newTVarIO) import Control.Concurrent.Class.MonadSTM.TSem (TSem, newTSem) -import Control.DeepSeq (NFData) +import Control.DeepSeq (NFData (..)) import Control.Exception (Exception (..)) import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map import Data.Monoid (Sum (..)) -import Data.Sequence.Strict (StrictSeq) +import Data.Sequence.Strict (StrictSeq, fromList) import Data.Set (Set) import Data.Word (Word64) import GHC.Generics (Generic) @@ -60,6 +60,7 @@ import NoThunks.Class (NoThunks (..)) import Ouroboros.Network.ControlMessage (ControlMessage) import Ouroboros.Network.Protocol.ObjectDiffusion.Type import Quiet (Quiet (..)) +import Test.QuickCheck (Arbitrary (..), elements) -- | Semaphore to guard access to the ObjectPool newtype ObjectPoolSem m = ObjectPoolSem (TSem m) @@ -108,6 +109,20 @@ data DecisionPeerState objectId object = DecisionPeerState } deriving (Eq, Show, Generic) +instance + ( Arbitrary objectId + , Arbitrary object + , Ord objectId + ) => + Arbitrary (DecisionPeerState objectId object) + where + arbitrary = DecisionPeerState + <$> (NumObjectIdsReq <$> arbitrary) + <*> (fromList <$> arbitrary) + <*> arbitrary + <*> arbitrary + <*> arbitrary + instance ( NoThunks objectId , NoThunks object @@ -125,6 +140,16 @@ data DecisionGlobalState peerAddr objectId object = DecisionGlobalState } deriving (Eq, Show, Generic) +instance + ( Arbitrary peerAddr + , Arbitrary object + , Arbitrary objectId + , Ord peerAddr + , Ord objectId + ) => + Arbitrary (DecisionGlobalState peerAddr objectId object) where + arbitrary = DecisionGlobalState <$> arbitrary + instance ( NoThunks peerAddr , NoThunks object @@ -199,12 +224,38 @@ data PeerDecision objectId object = PeerDecision } deriving (Show, Eq) +instance + ( Arbitrary objectId + , Ord objectId + ) => + Arbitrary (PeerDecision objectId object) where + arbitrary = PeerDecision + <$> (NumObjectIdsAck <$> arbitrary) + <*> (NumObjectIdsReq <$> arbitrary) + <*> arbitrary + <*> arbitrary + <*> arbitrary + +instance + ( NFData objectId + , NFData object + ) => + NFData (PeerDecision objectId object) where + rnf = undefined + data PeerDecisionStatus = DecisionUnread | DecisionBeingActedUpon | DecisionCompleted deriving (Show, Eq) +instance Arbitrary PeerDecisionStatus where + arbitrary = elements + [ DecisionUnread + , DecisionBeingActedUpon + , DecisionCompleted + ] + -- | A placeholder when no decision has been made, at the beginning of a loop. -- Nothing should be read from it except its status. unavailableDecision :: HasCallStack => PeerDecision objectId object From 987f215d3ff2f4fc39452b7bed3632711ca80e55 Mon Sep 17 00:00:00 2001 From: Thomas BAGREL Date: Fri, 24 Oct 2025 17:24:02 +0200 Subject: [PATCH 43/43] Continue working on benchmark gen for ObjectDiffusionV2/makeDecisions --- .../bench/ObjectDiffusion-bench/Main.hs | 94 +++++++-------- .../ObjectDiffusion/Inbound/V2/Decision.hs | 72 ++++++------ .../ObjectDiffusion/Inbound/V2/Policy.hs | 64 ++++++++-- .../ObjectDiffusion/Inbound/V2/Registry.hs | 111 ++++++++++-------- .../ObjectDiffusion/Inbound/V2/State.hs | 82 ++++++------- .../ObjectDiffusion/Inbound/V2/Types.hs | 74 +++++------- 6 files changed, 271 insertions(+), 226 deletions(-) diff --git a/ouroboros-consensus/bench/ObjectDiffusion-bench/Main.hs b/ouroboros-consensus/bench/ObjectDiffusion-bench/Main.hs index bd8f41f2f3..963f3c026a 100644 --- a/ouroboros-consensus/bench/ObjectDiffusion-bench/Main.hs +++ b/ouroboros-consensus/bench/ObjectDiffusion-bench/Main.hs @@ -15,11 +15,10 @@ import Control.Exception (evaluate) import Data.Hashable (Hashable) import Debug.Trace (traceMarkerIO) import GHC.Generics (Generic) +import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision qualified as OD import System.Random.SplitMix qualified as SM -import Test.Tasty.Bench import Test.QuickCheck (Arbitrary (..)) - -import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Decision qualified as OD +import Test.Tasty.Bench -- TODO: We will probably want to use the actual types used in vote/cert diffusion, -- instead of placeholders. @@ -47,51 +46,48 @@ instance Arbitrary DummyObject where main :: IO () main = defaultMain - [ bgroup "ouroboros-consensus:ObjectDiffusion" - [ bgroup "VoteDiffusion" - [ env - (do let a = OD.mkDecisionContext (SM.mkSMGen 123) 10 - evaluate (rnf a) - traceMarkerIO "evaluated decision context" - return a - ) - (\a -> bench "makeDecisions: 10" $ - nf makeVoteDiffusionDecision a - ) - , env - (do let a = OD.mkDecisionContext (SM.mkSMGen 456) 100 - evaluate (rnf a) - traceMarkerIO "evaluated decision context" - return a - ) - (\a -> bench "makeDecisions: 100" $ - nf makeVoteDiffusionDecision a - ) - , env - (do let a = OD.mkDecisionContext (SM.mkSMGen 789) 1_000 - evaluate (rnf a) - traceMarkerIO "evaluated decision context" - return a - ) - (\a -> bench "makeDecisions: 1_000" $ - nf makeVoteDiffusionDecision a - ) + [ bgroup + "ouroboros-consensus:ObjectDiffusion" + [ bgroup + "VoteDiffusion" + [ env + ( do + let a = OD.mkDecisionContext (SM.mkSMGen 123) 10 Nothing + evaluate (rnf a) + traceMarkerIO "evaluated decision context" + return a + ) + ( \a -> + bench "makeDecisions: 10" $ + nf makeVoteDiffusionDecision a + ) + , env + ( do + let a = OD.mkDecisionContext (SM.mkSMGen 456) 100 Nothing + evaluate (rnf a) + traceMarkerIO "evaluated decision context" + return a + ) + ( \a -> + bench "makeDecisions: 100" $ + nf makeVoteDiffusionDecision a + ) + , env + ( do + let a = OD.mkDecisionContext (SM.mkSMGen 789) 1_000 Nothing + evaluate (rnf a) + traceMarkerIO "evaluated decision context" + return a + ) + ( \a -> + bench "makeDecisions: 1_000" $ + nf makeVoteDiffusionDecision a + ) + ] + , bgroup "CertDiffusion" [] ] - , bgroup "CertDiffusion" [] - ] ] - where - -- TODO: We probably want to use the decision policy for vote/cert diffusion - -- instead of an arbitrary one. - makeVoteDiffusionDecision = \OD.DecisionContext - { OD.dcRng - , OD.dcHasObject - , OD.dcDecisionPolicy - , OD.dcGlobalState - , OD.dcPrevDecisions - } -> OD.makeDecisions @DummyPeerAddr @DummyObjectId @DummyObject - dcRng - dcHasObject - dcDecisionPolicy - dcGlobalState - dcPrevDecisions + where + -- TODO: We probably want to use the decision policy for vote/cert diffusion + -- instead of an arbitrary one. + makeVoteDiffusionDecision = \decisionContext -> OD.makeDecisions @DummyPeerAddr @DummyObjectId @DummyObject decisionContext diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs index 7b1dfe2698..29f96b2b2f 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Decision.hs @@ -1,4 +1,6 @@ {-# LANGUAGE BlockArguments #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE ImportQualifiedPost #-} {-# LANGUAGE NamedFieldPuns #-} @@ -20,19 +22,21 @@ import Data.Hashable (Hashable (..)) import Data.Map.Merge.Strict qualified as Map import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map +import Data.Maybe (fromMaybe) import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq import Data.Set (Set) import Data.Set qualified as Set +import GHC.Generics (Generic) import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.State import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types import Ouroboros.Network.Protocol.ObjectDiffusion.Type +import System.Random (StdGen, mkStdGen) +import System.Random.SplitMix (SMGen, nextInt) import Test.QuickCheck (Arbitrary (..)) import Test.QuickCheck.Gen (Gen (..)) import Test.QuickCheck.Random (QCGen (..)) -import System.Random.SplitMix (SMGen, nextInt) -import System.Random (StdGen, mkStdGen) data DecisionContext peerAddr objectId object = DecisionContext { dcRng :: StdGen @@ -41,18 +45,11 @@ data DecisionContext peerAddr objectId object = DecisionContext , dcGlobalState :: DecisionGlobalState peerAddr objectId object , dcPrevDecisions :: Map peerAddr (PeerDecision objectId object) } + deriving stock Generic + deriving anyclass NFData -instance - ( NFData peerAddr - , NFData objectId - , NFData object - ) => - NFData (DecisionContext peerAddr objectId object) where - rnf = undefined +-- TODO: Using `sized` to control size, we could maybe provide directly an instance of Arbitrary for DecisionContext? --- TODO: do not generate dcDecisionPolicy arbitrarily, it makes little sense. --- Instead we should provide decision policies fit for the concrete object types --- we want to make decisions for. mkDecisionContext :: forall peerAddr objectId object. ( Arbitrary peerAddr @@ -64,19 +61,22 @@ mkDecisionContext :: ) => SMGen -> Int -> + -- | If we want to provide a specific decision policy instead of relying on an arbitrary variation of the default one + Maybe DecisionPolicy -> DecisionContext peerAddr objectId object -mkDecisionContext stdGen size = unGen gen (QCGen stdGen') size - where - (salt, stdGen') = nextInt stdGen - gen :: Gen (DecisionContext peerAddr objectId object) - gen = do - dcRng <- mkStdGen <$> arbitrary - dcDecisionPolicy <- arbitrary - dcGlobalState <- arbitrary - dcPrevDecisions <- arbitrary - let dcHasObject objId = - hashWithSalt salt objId `mod` 2 == 0 - pure $ DecisionContext +mkDecisionContext stdGen size mPolicy = unGen gen (QCGen stdGen') size + where + (salt, stdGen') = nextInt stdGen + gen :: Gen (DecisionContext peerAddr objectId object) + gen = do + dcRng <- mkStdGen <$> arbitrary + dcDecisionPolicy <- fromMaybe arbitrary (pure <$> mPolicy) + dcGlobalState <- arbitrary + dcPrevDecisions <- arbitrary + let dcHasObject objId = + hashWithSalt salt objId `mod` 2 == 0 + pure $ + DecisionContext { dcRng , dcHasObject , dcDecisionPolicy @@ -93,25 +93,25 @@ makeDecisions :: ( Ord peerAddr , Ord objectId ) => - StdGen -> - (objectId -> Bool) -> - -- | decision decisionPolicy - DecisionPolicy -> - -- | decision context - DecisionGlobalState peerAddr objectId object -> - -- | Previous decisions - Map peerAddr (PeerDecision objectId object) -> + DecisionContext peerAddr objectId object -> -- | New decisions Map peerAddr (PeerDecision objectId object) -makeDecisions rng hasObject decisionPolicy globalState prevDecisions = +makeDecisions DecisionContext{dcRng, dcHasObject, dcDecisionPolicy, dcGlobalState, dcPrevDecisions} = let -- A subset of peers are currently executing a decision. We shouldn't update the decision for them - frozenPeersToDecisions = Map.filter (\PeerDecision{pdStatus} -> pdStatus == DecisionBeingActedUpon) prevDecisions + frozenPeersToDecisions = Map.filter (\PeerDecision{pdStatus} -> pdStatus == DecisionBeingActedUpon) dcPrevDecisions -- We do it in two steps, because computing the acknowledgment tell which objects from dpsObjectsAvailableIds sets of each peer won't actually be available anymore (as soon as we ack them), -- so that the pickObjectsToReq function can take this into account. - (ackAndRequestIdsDecisions, peerToIdsToAck) = computeAck hasObject decisionPolicy globalState frozenPeersToDecisions - peersToObjectsToReq = pickObjectsToReq rng hasObject decisionPolicy globalState frozenPeersToDecisions peerToIdsToAck + (ackAndRequestIdsDecisions, peerToIdsToAck) = computeAck dcHasObject dcDecisionPolicy dcGlobalState frozenPeersToDecisions + peersToObjectsToReq = + pickObjectsToReq + dcRng + dcHasObject + dcDecisionPolicy + dcGlobalState + frozenPeersToDecisions + peerToIdsToAck in Map.intersectionWith (\decision objectsToReqIds -> decision{pdObjectsToReqIds = objectsToReqIds}) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs index b302cf50f5..77d1ecd85a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Policy.hs @@ -1,16 +1,25 @@ +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} + module Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Policy ( DecisionPolicy (..) , defaultDecisionPolicy ) where -import Test.QuickCheck (Arbitrary (..)) - +import Control.DeepSeq (NFData) +import GHC.Generics (Generic) +import NoThunks.Class import Ouroboros.Consensus.MiniProtocol.ObjectDiffusion.Inbound.V2.Types (ObjectMultiplicity (..)) import Ouroboros.Network.Protocol.ObjectDiffusion.Type ( NumObjectIdsReq (..) , NumObjectsOutstanding (..) , NumObjectsReq (..) ) +import Test.QuickCheck (Arbitrary (..), Gen, choose) -- | Policy for making decisions data DecisionPolicy = DecisionPolicy @@ -25,15 +34,24 @@ data DecisionPolicy = DecisionPolicy , dpTargetObjectRedundancy :: !ObjectMultiplicity -- ^ from how many peers download the `objectId` simultaneously } - deriving Show + deriving stock (Show, Eq, Generic) + deriving anyclass (NFData, NoThunks) instance Arbitrary DecisionPolicy where - arbitrary = DecisionPolicy - <$> (NumObjectIdsReq <$> arbitrary) - <*> (NumObjectsOutstanding <$> arbitrary) - <*> (NumObjectsReq <$>arbitrary) - <*> (NumObjectsReq <$>arbitrary) - <*> (ObjectMultiplicity <$> arbitrary) + arbitrary = + let DecisionPolicy + { dpMaxNumObjectIdsReq + , dpMaxNumObjectsOutstanding + , dpMaxNumObjectsInflightPerPeer + , dpMaxNumObjectsInflightTotal + , dpTargetObjectRedundancy + } = defaultDecisionPolicy + in DecisionPolicy + <$> (chooseGeometricWithMedian dpMaxNumObjectIdsReq) + <*> (chooseGeometricWithMedian dpMaxNumObjectsOutstanding) + <*> (chooseGeometricWithMedian dpMaxNumObjectsInflightPerPeer) + <*> (chooseGeometricWithMedian dpMaxNumObjectsInflightTotal) + <*> (chooseGeometricWithMedian dpTargetObjectRedundancy) defaultDecisionPolicy :: DecisionPolicy defaultDecisionPolicy = @@ -44,3 +62,31 @@ defaultDecisionPolicy = , dpMaxNumObjectsInflightTotal = 20 , dpTargetObjectRedundancy = 2 } + +-- TODO: this needs to be tested and inspected + +-- | Geometric-decay generator over [1 .. maxBound - 1] for the type 'a'. +-- Smaller values are more likely; the (lower) median is ~ medianTarget. +-- Works for any Integral + Bounded numeric type (e.g., Int, Word32, Int64). +chooseGeometricWithMedian :: forall a. (Integral a, Bounded a) => a -> Gen a +chooseGeometricWithMedian medianTarget + | (maxBound @a) <= 1 = + error "Type's maxBound <= 1: no room for [1..maxBound-1]" + | medianTarget < 1 || medianTarget >= maxBound = + error "medianTarget must be in [1 .. maxBound-1]" + | otherwise = do + let lo = 1 + hi = maxBound - 1 + -- use Integer for counts, Double for CDF inversion + nI = toInteger (hi - lo + 1) + mI = toInteger (medianTarget - lo + 1) + n = fromIntegral nI :: Double + m = fromIntegral mI :: Double + p = 1 - 2 ** (-1 / m) -- set so P(X ≤ median) ≈ 0.5 + q = 1 - p -- decay factor + qn = q ** n -- truncation term + u <- choose (0, 1 :: Double) + let y = 1 - u * (1 - qn) + k = floor (log y / log q) -- inverse truncated geometric CDF + k' = max 0 (min (floor (n - 1)) k) + pure (lo + fromInteger k') diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs index 24507e1e50..3f9c73106e 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Registry.hs @@ -166,54 +166,58 @@ withPeer return newChan let !inboundPeerAPI = - PeerStateAPI - { psaReadDecision = atomically $ do - -- This should block until the decision has status `DecisionUnread` - -- which means it is a new decision that the peer has not acted upon yet - -- If `DecisionCompleted` is read here, it means the decision logic hasn't had time to make a new decision for this peer - decision@PeerDecision{pdStatus} <- readTVar decisionChan - when (pdStatus == DecisionBeingActedUpon) $ error "Forgot to call `psaOnDecisionCompleted` for this peer" - check $ pdStatus == DecisionUnread - let decision' = decision{pdStatus = DecisionBeingActedUpon} - writeTVar decisionChan decision' - return decision' - , psaOnDecisionCompleted = atomically $ do - decision@PeerDecision{pdStatus} <- readTVar decisionChan - when (pdStatus == DecisionUnread) $ error "Forgot to call `psaReadDecision` for this peer, or the decision thread has mistakenly updated the decision for this peer while it was executing it" - when (pdStatus == DecisionCompleted) $ error "`psaOnDecisionCompleted` has already been called for this peer" - let decision' = decision{pdStatus = DecisionCompleted} - writeTVar decisionChan decision' - , psaOnRequestIds = - State.onRequestIds - objectDiffusionTracer - decisionTracer - globalStateVar - peerAddr - , psaOnRequestObjects = - State.onRequestObjects - objectDiffusionTracer - decisionTracer - globalStateVar - peerAddr - , psaOnReceiveIds = - State.onReceiveIds - objectDiffusionTracer - decisionTracer - objectPoolWriter - globalStateVar - peerAddr - , psaOnReceiveObjects = \objects -> do - PeerDecision{pdObjectsToReqIds} <- atomically $ readTVar decisionChan - State.onReceiveObjects - objectDiffusionTracer - decisionTracer - globalStateVar - objectPoolWriter - objectPoolSem - peerAddr - pdObjectsToReqIds - objects - } + PeerStateAPI + { psaReadDecision = atomically $ do + -- This should block until the decision has status `DecisionUnread` + -- which means it is a new decision that the peer has not acted upon yet + -- If `DecisionCompleted` is read here, it means the decision logic hasn't had time to make a new decision for this peer + decision@PeerDecision{pdStatus} <- readTVar decisionChan + when (pdStatus == DecisionBeingActedUpon) $ + error "Forgot to call `psaOnDecisionCompleted` for this peer" + check $ pdStatus == DecisionUnread + let decision' = decision{pdStatus = DecisionBeingActedUpon} + writeTVar decisionChan decision' + return decision' + , psaOnDecisionCompleted = atomically $ do + decision@PeerDecision{pdStatus} <- readTVar decisionChan + when (pdStatus == DecisionUnread) $ + error + "Forgot to call `psaReadDecision` for this peer, or the decision thread has mistakenly updated the decision for this peer while it was executing it" + when (pdStatus == DecisionCompleted) $ + error "`psaOnDecisionCompleted` has already been called for this peer" + let decision' = decision{pdStatus = DecisionCompleted} + writeTVar decisionChan decision' + , psaOnRequestIds = + State.onRequestIds + objectDiffusionTracer + decisionTracer + globalStateVar + peerAddr + , psaOnRequestObjects = + State.onRequestObjects + objectDiffusionTracer + decisionTracer + globalStateVar + peerAddr + , psaOnReceiveIds = + State.onReceiveIds + objectDiffusionTracer + decisionTracer + objectPoolWriter + globalStateVar + peerAddr + , psaOnReceiveObjects = \objects -> do + PeerDecision{pdObjectsToReqIds} <- atomically $ readTVar decisionChan + State.onReceiveObjects + objectDiffusionTracer + decisionTracer + globalStateVar + objectPoolWriter + objectPoolSem + peerAddr + pdObjectsToReqIds + objects + } -- register the peer in the global state now modifyTVar globalStateVar registerPeerGlobalState @@ -292,12 +296,19 @@ decisionLogicThread decisionTracer countersTracer ObjectPoolWriter{opwHasObject} -- because makeDecisions should be atomic with respect to reading the global state and -- reading the previous decisions (newDecisions, counters) <- atomically $ do - decisionsChannels <- readTVar decisionChannelsVar prevDecisions <- traverse readTVar decisionsChannels globalState <- readTVar globalStateVar hasObject <- opwHasObject - let newDecisions = makeDecisions rng hasObject decisionPolicy globalState prevDecisions + let newDecisions = + makeDecisions + DecisionContext + { dcRng = rng + , dcHasObject = hasObject + , dcDecisionPolicy = decisionPolicy + , dcGlobalState = globalState + , dcPrevDecisions = prevDecisions + } peerToChannel <- readTVar decisionChannelsVar -- Pair decision channel with the corresponding decision diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs index 6fd07ecc85..a3d237be48 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/State.hs @@ -54,7 +54,7 @@ onRequestIds globalStateVar ( \globalState -> let globalState' = onRequestIdsImpl peerAddr numIdsToAck numIdsToReq globalState - in (globalState', globalState') + in (globalState', globalState') ) traceWith odTracer (TraceObjectDiffusionInboundRequestedIds (fromIntegral numIdsToReq)) traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onRequestIds" globalState') @@ -89,9 +89,9 @@ onRequestIdsImpl -- We compute the ids to ack and new state of the FIFO based on the number of ids to ack given by the decision logic (idsToAck, dpsOutstandingFifo') = assert (StrictSeq.length dpsOutstandingFifo >= fromIntegral numIdsToAck) $ - StrictSeq.splitAt - (fromIntegral numIdsToAck) - dpsOutstandingFifo + StrictSeq.splitAt + (fromIntegral numIdsToAck) + dpsOutstandingFifo -- We remove the acknowledged ids from dpsObjectsAvailableIds if they were present. -- We need to do that because objects that were advertised by this corresponding outbound peer @@ -152,13 +152,13 @@ onRequestObjectsImpl Map.adjust ( \ps@DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} -> assert - ( objectIds `Set.isSubsetOf` dpsObjectsAvailableIds - && Set.null (objectIds `Set.intersection` dpsObjectsInflightIds) - ) $ - ps - { dpsObjectsAvailableIds = dpsObjectsAvailableIds \\ objectIds - , dpsObjectsInflightIds = dpsObjectsInflightIds `Set.union` objectIds - } + ( objectIds `Set.isSubsetOf` dpsObjectsAvailableIds + && Set.null (objectIds `Set.intersection` dpsObjectsInflightIds) + ) + $ ps + { dpsObjectsAvailableIds = dpsObjectsAvailableIds \\ objectIds + , dpsObjectsInflightIds = dpsObjectsInflightIds `Set.union` objectIds + } ) peerAddr dgsPeerStates @@ -197,26 +197,27 @@ onReceiveIds globalStateVar ( \globalState -> let globalState' = onReceiveIdsImpl peerAddr numIdsInitiallyRequested receivedIds globalState - in (globalState', globalState') + in (globalState', globalState') ) traceWith odTracer (TraceObjectDiffusionInboundReceivedIds (length receivedIds)) traceWith decisionTracer (TraceDecisionLogicGlobalStateUpdated "onReceiveIds" globalState') - where - checkProtocolErrors :: - (objectId -> Bool) -> - DecisionPeerState objectId object-> - NumObjectIdsReq -> - [objectId] -> - m () - checkProtocolErrors hasObject DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} nReq ids = do - when (length ids > fromIntegral nReq) $ throw ProtocolErrorObjectIdsNotRequested - let idSet = Set.fromList ids - when (length ids /= Set.size idSet) $ throw ProtocolErrorObjectIdsDuplicate - when - ( (not $ Set.null $ idSet `Set.intersection` dpsObjectsAvailableIds) - || (not $ Set.null $ idSet `Set.intersection` dpsObjectsInflightIds) - || (any hasObject ids) - ) $ throw ProtocolErrorObjectIdAlreadyKnown + where + checkProtocolErrors :: + (objectId -> Bool) -> + DecisionPeerState objectId object -> + NumObjectIdsReq -> + [objectId] -> + m () + checkProtocolErrors hasObject DecisionPeerState{dpsObjectsAvailableIds, dpsObjectsInflightIds} nReq ids = do + when (length ids > fromIntegral nReq) $ throw ProtocolErrorObjectIdsNotRequested + let idSet = Set.fromList ids + when (length ids /= Set.size idSet) $ throw ProtocolErrorObjectIdsDuplicate + when + ( (not $ Set.null $ idSet `Set.intersection` dpsObjectsAvailableIds) + || (not $ Set.null $ idSet `Set.intersection` dpsObjectsInflightIds) + || (any hasObject ids) + ) + $ throw ProtocolErrorObjectIdAlreadyKnown onReceiveIdsImpl :: forall peerAddr object objectId. @@ -312,7 +313,7 @@ onReceiveObjects peerAddr objectsReceivedMap globalState - in (globalState', globalState') + in (globalState', globalState') ) traceWith odTracer (TraceObjectDiffusionInboundReceivedObjects (length objectsReceived)) traceWith tracer (TraceDecisionLogicGlobalStateUpdated "onReceiveObjects" globalState') @@ -324,15 +325,15 @@ onReceiveObjects poolSem peerAddr objectsReceivedMap - where - checkProtocolErrors :: - Set objectId-> - Map objectId object -> - m () - checkProtocolErrors requested received' = do - let received = Map.keysSet received' - when (not $ Set.null $ requested \\ received) $ throw ProtocolErrorObjectMissing - when (not $ Set.null $ received \\ requested) $ throw ProtocolErrorObjectNotRequested + where + checkProtocolErrors :: + Set objectId -> + Map objectId object -> + m () + checkProtocolErrors requested received' = do + let received = Map.keysSet received' + when (not $ Set.null $ requested \\ received) $ throw ProtocolErrorObjectMissing + when (not $ Set.null $ received \\ requested) $ throw ProtocolErrorObjectNotRequested onReceiveObjectsImpl :: forall peerAddr object objectId. @@ -366,8 +367,9 @@ onReceiveObjectsImpl dgsPeerStates -- subtract requested from in-flight - dpsObjectsInflightIds' = assert (objectsReceivedIds `Set.isSubsetOf` dpsObjectsInflightIds) $ - dpsObjectsInflightIds \\ objectsReceivedIds + dpsObjectsInflightIds' = + assert (objectsReceivedIds `Set.isSubsetOf` dpsObjectsInflightIds) $ + dpsObjectsInflightIds \\ objectsReceivedIds dpsObjectsOwtPool' = dpsObjectsOwtPool <> objectsReceived diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs index 22313198b9..3cfdef80da 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ObjectDiffusion/Inbound/V2/Types.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE ExistentialQuantification #-} @@ -107,7 +108,8 @@ data DecisionPeerState objectId object = DecisionPeerState -- submitted to the objectpool) -- * removed by `withObjectPoolSem` } - deriving (Eq, Show, Generic) + deriving stock (Show, Eq, Generic) + deriving anyclass (NFData, NoThunks) instance ( Arbitrary objectId @@ -116,19 +118,14 @@ instance ) => Arbitrary (DecisionPeerState objectId object) where - arbitrary = DecisionPeerState + arbitrary = + DecisionPeerState <$> (NumObjectIdsReq <$> arbitrary) <*> (fromList <$> arbitrary) <*> arbitrary <*> arbitrary <*> arbitrary -instance - ( NoThunks objectId - , NoThunks object - ) => - NoThunks (DecisionPeerState objectId object) - -- | Shared state of all `ObjectDiffusion` clients. data DecisionGlobalState peerAddr objectId object = DecisionGlobalState { dgsPeerStates :: !(Map peerAddr (DecisionPeerState objectId object)) @@ -138,7 +135,8 @@ data DecisionGlobalState peerAddr objectId object = DecisionGlobalState -- there's always an entry in this map even if the set of `objectId`s is -- empty. } - deriving (Eq, Show, Generic) + deriving stock (Show, Eq, Generic) + deriving anyclass (NFData, NoThunks) instance ( Arbitrary peerAddr @@ -147,15 +145,9 @@ instance , Ord peerAddr , Ord objectId ) => - Arbitrary (DecisionGlobalState peerAddr objectId object) where - arbitrary = DecisionGlobalState <$> arbitrary - -instance - ( NoThunks peerAddr - , NoThunks object - , NoThunks objectId - ) => - NoThunks (DecisionGlobalState peerAddr objectId object) + Arbitrary (DecisionGlobalState peerAddr objectId object) + where + arbitrary = DecisionGlobalState <$> arbitrary -- | Merge dpsObjectsAvailableIds from all peers of the global state. dgsObjectsAvailableMultiplicities :: @@ -222,39 +214,37 @@ data PeerDecision objectId object = PeerDecision , pdStatus :: !PeerDecisionStatus -- ^ Whether the peer is actually executing the said decision } - deriving (Show, Eq) + deriving stock (Show, Eq, Generic) + deriving anyclass (NFData, NoThunks) instance ( Arbitrary objectId , Ord objectId ) => - Arbitrary (PeerDecision objectId object) where - arbitrary = PeerDecision + Arbitrary (PeerDecision objectId object) + where + arbitrary = + PeerDecision <$> (NumObjectIdsAck <$> arbitrary) <*> (NumObjectIdsReq <$> arbitrary) <*> arbitrary <*> arbitrary <*> arbitrary -instance - ( NFData objectId - , NFData object - ) => - NFData (PeerDecision objectId object) where - rnf = undefined - data PeerDecisionStatus = DecisionUnread | DecisionBeingActedUpon | DecisionCompleted - deriving (Show, Eq) + deriving stock (Show, Eq, Generic) + deriving anyclass (NFData, NoThunks) instance Arbitrary PeerDecisionStatus where - arbitrary = elements - [ DecisionUnread - , DecisionBeingActedUpon - , DecisionCompleted - ] + arbitrary = + elements + [ DecisionUnread + , DecisionBeingActedUpon + , DecisionCompleted + ] -- | A placeholder when no decision has been made, at the beginning of a loop. -- Nothing should be read from it except its status. @@ -272,7 +262,7 @@ unavailableDecision = data TraceDecisionLogic peerAddr objectId object = TraceDecisionLogicGlobalStateUpdated String (DecisionGlobalState peerAddr objectId object) | TraceDecisionLogicDecisionsMade (Map peerAddr (PeerDecision objectId object)) - deriving Show + deriving stock (Show, Eq, Generic) data ObjectDiffusionCounters = ObjectDiffusionCounters @@ -287,7 +277,7 @@ data ObjectDiffusionCounters -- objectpool (each peer need to acquire the semaphore to effectively add -- them to the pool) } - deriving (Eq, Show) + deriving stock (Show, Eq, Generic) makeObjectDiffusionCounters :: Ord objectId => @@ -309,8 +299,8 @@ newtype NumObjectsProcessed = NumObjectsProcessed { getNumObjectsProcessed :: Word64 } - deriving (Eq, Ord, NFData, Generic) - deriving newtype (Num, Enum, Real, Integral, Bounded, NoThunks) + deriving stock (Eq, Ord, Generic) + deriving newtype (NFData, NoThunks, Num, Enum, Real, Integral, Bounded) deriving Semigroup via (Sum Word64) deriving Monoid via (Sum Word64) deriving Show via (Quiet NumObjectsProcessed) @@ -319,8 +309,8 @@ newtype ObjectMultiplicity = ObjectMultiplicity { getObjectMultiplicity :: Word64 } - deriving (Eq, Ord, NFData, Generic) - deriving newtype (Num, Enum, Real, Integral, Bounded, NoThunks) + deriving stock (Eq, Ord, Generic) + deriving newtype (NFData, NoThunks, Num, Enum, Real, Integral, Bounded) deriving Semigroup via (Sum Word64) deriving Monoid via (Sum Word64) deriving Show via (Quiet ObjectMultiplicity) @@ -335,7 +325,7 @@ data TraceObjectDiffusionInbound objectId object -- to act on it. TraceObjectDiffusionInboundReceivedControlMessage ControlMessage | TraceObjectDiffusionInboundReceivedDecision (PeerDecision objectId object) - deriving (Eq, Show) + deriving stock (Show, Eq, Generic) data ObjectDiffusionInboundError = ProtocolErrorObjectNotRequested @@ -343,7 +333,7 @@ data ObjectDiffusionInboundError | ProtocolErrorObjectIdAlreadyKnown | ProtocolErrorObjectIdsDuplicate | ProtocolErrorObjectMissing - deriving Show + deriving stock (Show, Eq, Generic) instance Exception ObjectDiffusionInboundError where displayException ProtocolErrorObjectNotRequested =