Skip to content

Commit

Permalink
Follow change everywhere else
Browse files Browse the repository at this point in the history
  • Loading branch information
Niols committed May 6, 2024
1 parent 5323ab1 commit 958ad56
Show file tree
Hide file tree
Showing 9 changed files with 57 additions and 67 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,7 @@ import Test.Consensus.Network.AnchoredFragment.Extras (slotLength)
import Test.Consensus.PeerSimulator.StateView
(PeerSimulatorResult (..), StateView (..), pscrToException)
import Test.Consensus.PointSchedule
import Test.Consensus.PointSchedule.Peers (Peer (..), PeerId (..),
Peers (..))
import Test.Consensus.PointSchedule.Peers (PeerId (..), Peers (..))
import Test.Consensus.PointSchedule.SinglePeer (SchedulePoint (..))
import Test.Util.Orphans.IOLike ()
import Test.Util.TestBlock (TestBlock, TestHash (TestHash),
Expand Down Expand Up @@ -166,15 +165,15 @@ resultClassifiers GenesisTest{gtSchedule} RunGenesisTestResult{rgtrStateView} =
StateView{svPeerSimulatorResults} = rgtrStateView

adversaries :: [PeerId]
adversaries = Map.keys $ others gtSchedule
adversaries = fmap AdversarialPeer $ Map.keys $ adversarialPeers gtSchedule

adversariesCount = fromIntegral $ length adversaries

adversariesExceptions :: [(PeerId, SomeException)]
adversariesExceptions = mapMaybe
(\PeerSimulatorResult{psePeerId, pseResult} -> case psePeerId of
HonestPeer -> Nothing
pid -> (pid,) <$> pscrToException pseResult
HonestPeer _ -> Nothing
pid -> (pid,) <$> pscrToException pseResult
)
svPeerSimulatorResults

Expand Down Expand Up @@ -252,18 +251,18 @@ scheduleClassifiers GenesisTest{gtSchedule = schedule} =
rollbacks :: Peers Bool
rollbacks = hasRollback <$> schedule

adversaryRollback = any value $ others rollbacks
adversaryRollback = any id $ adversarialPeers rollbacks

honestRollback = value $ honest rollbacks
honestRollback = honestPeers rollbacks Map.! 1

allAdversariesEmpty = all value $ others $ null <$> schedule
allAdversariesEmpty = all id $ adversarialPeers $ null <$> schedule

isTrivial :: PeerSchedule TestBlock -> Bool
isTrivial = \case
[] -> True
(t0, _):points -> all ((== t0) . fst) points

allAdversariesTrivial = all value $ others $ isTrivial <$> schedule
allAdversariesTrivial = all id $ adversarialPeers $ isTrivial <$> schedule

simpleHash ::
HeaderHash block ~ TestHash =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import Control.Monad (replicateM)
import Data.Containers.ListUtils (nubOrd)
import Data.Functor (($>))
import Data.List (nub)
import qualified Data.Map.Strict as Map
import Data.Maybe (mapMaybe)
import Ouroboros.Consensus.Block (blockSlot, succWithOrigin)
import Ouroboros.Consensus.MiniProtocol.ChainSync.Client
Expand All @@ -22,8 +23,7 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..),
import Test.Consensus.PeerSimulator.StateView (StateView (..))
import Test.Consensus.PeerSimulator.Trace (TraceEvent (..))
import Test.Consensus.PointSchedule
import Test.Consensus.PointSchedule.Peers (Peer (..), Peers (..),
mkPeers)
import Test.Consensus.PointSchedule.Peers (Peers (..), peers')
import Test.Tasty
import Test.Tasty.QuickCheck
import Test.Util.Orphans.IOLike ()
Expand Down Expand Up @@ -70,7 +70,7 @@ prop_happyPath synchronized =
otherHonests <- if synchronized
then pure $ replicate numOthers honest
else replicateM numOthers (genHonestSchedule gt)
pure $ gt $> mkPeers honest otherHonests
pure $ gt $> peers' [honest] otherHonests
)
( defaultSchedulerConfig
{ scEnableCSJ = True
Expand Down Expand Up @@ -122,7 +122,7 @@ prop_happyPath synchronized =
genHonestSchedule :: GenesisTest TestBlock () -> Gen (PeerSchedule TestBlock)
genHonestSchedule gt = do
ps <- genUniformSchedulePoints gt
pure $ value $ honest ps
pure $ honestPeers ps Map.! 1

isNewerThanJumpSizeFromTip :: GenesisTestFull TestBlock -> Header TestBlock -> Bool
isNewerThanJumpSizeFromTip gt hdr =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ staticCandidates GenesisTest {gtSecurityParam, gtGenesisWindow, gtBlockTree} =
tips = branchTip <$> candidates

candidates :: Map PeerId (AnchoredFragment TestBlock)
candidates = Map.fromList (zip (HonestPeer : enumerateAdversaries) chains)
candidates = Map.fromList (zip (HonestPeer 1 : enumerateAdversaries) chains)

chains = btTrunk gtBlockTree : (btbFull <$> branches)

Expand All @@ -134,7 +134,7 @@ prop_densityDisconnectStatic =
counterexample "it should disconnect some node" (not (null disconnect))
.&&.
counterexample "it should not disconnect the honest peer"
(HonestPeer `notElem` disconnect)
(HonestPeer 1 `notElem` disconnect)
where
mkState :: AnchoredFragment (Header TestBlock) -> ChainSyncState TestBlock
mkState frag =
Expand Down Expand Up @@ -192,7 +192,7 @@ initCandidates GenesisTest {gtSecurityParam, gtGenesisWindow, gtBlockTree} =
fullTree = gtBlockTree
}
where
peers = mkPeers (peer trunk (AF.Empty (AF.headAnchor trunk)) (btTrunk gtBlockTree)) (branchPeer <$> branches)
peers = peers' [peer trunk (AF.Empty (AF.headAnchor trunk)) (btTrunk gtBlockTree)] (branchPeer <$> branches)

branchPeer branch = peer (btbPrefix branch) (btbSuffix branch) (btbFull branch)

Expand Down Expand Up @@ -229,8 +229,8 @@ data UpdateEvent = UpdateEvent {
}

snapshotTree :: Peers EvolvingPeer -> BlockTree (Header TestBlock)
snapshotTree Peers {honest, others} =
foldr addBranch' (mkTrunk (candidate (value honest))) (candidate . value <$> others)
snapshotTree Peers {honestPeers, adversarialPeers} =
foldr addBranch' (mkTrunk (candidate (honestPeers Map.! 1))) (candidate <$> adversarialPeers)

prettyUpdateEvent :: UpdateEvent -> [String]
prettyUpdateEvent UpdateEvent {target, added, killed, bounds, tree, loeFrag, curChain} =
Expand Down Expand Up @@ -273,7 +273,7 @@ updatePeers ::
UpdateEvent ->
Either (MonotonicityResult, Peers EvolvingPeer) Evolution
updatePeers (GenesisWindow sgen) peers killedBefore event@UpdateEvent {target, killed = killedNow}
| HonestPeer `Set.member` killedNow
| HonestPeer 1 `Set.member` killedNow
= Left (HonestKilled, peers)
| not (null violations)
= Left (Nonmonotonic event, peers)
Expand All @@ -286,12 +286,12 @@ updatePeers (GenesisWindow sgen) peers killedBefore event@UpdateEvent {target, k
violations = killedBefore \\ killedNow

-- The new state if no violations were detected
evo@Evolution {peers = Peers {others = remaining}}
evo@Evolution {peers = Peers {adversarialPeers = remaining}}
| targetExhausted
-- If the target is done, reset the set of killed peers, since other peers
-- may have lost only against the target.
-- Remove the target from the active peers.
= Evolution {peers = peers {others = Map.delete target (others peers)}, killed = mempty}
= Evolution {peers = deletePeer target peers, killed = mempty}
| otherwise
-- Otherwise replace the killed peers with the current set
= Evolution {peers, killed = killedNow}
Expand All @@ -311,11 +311,11 @@ updatePeers (GenesisWindow sgen) peers killedBefore event@UpdateEvent {target, k
-- The selection will then be computed by taking up to k blocks after the immutable tip
-- on this peer's candidate fragment.
firstBranch :: Peers EvolvingPeer -> Peer EvolvingPeer
firstBranch Peers {honest, others} =
firstBranch peers =
fromMaybe newest $
minimumBy (compare `on` forkAnchor) <$> nonEmpty (filter hasForked (toList others))
minimumBy (compare `on` forkAnchor) <$> nonEmpty (filter hasForked (toList (adversarialPeers'' peers)))
where
newest = maximumBy (compare `on` (AF.headSlot . candidate . value)) (honest : toList others)
newest = maximumBy (compare `on` (AF.headSlot . candidate . value)) (toList (honestPeers'' peers) ++ toList (adversarialPeers'' peers))
forkAnchor = fromWithOrigin 0 . AF.anchorToSlotNo . AF.anchor . forkSuffix . value
hasForked Peer {value = EvolvingPeer {candidate, forkSlot}} =
AF.headSlot candidate >= forkSlot
Expand All @@ -324,7 +324,7 @@ firstBranch Peers {honest, others} =
-- for all peers, and then taking the earliest among the results.
immutableTip :: Peers EvolvingPeer -> AF.Point (Header TestBlock)
immutableTip peers =
minimum (lastHonest <$> toList (others peers))
minimum (lastHonest <$> toList (adversarialPeers'' peers))
where
lastHonest Peer {value = EvolvingPeer {candidate, forkSlot = NotOrigin forkSlot}} =
AF.headPoint $
Expand Down Expand Up @@ -469,7 +469,7 @@ prop_densityDisconnectTriggersChainSel =

( \GenesisTest {gtBlockTree, gtSchedule} stateView@StateView {svTipBlock} ->
let
othersCount = Map.size (others gtSchedule)
othersCount = Map.size (adversarialPeers gtSchedule)
exnCorrect = case exceptionsByComponent ChainSyncClient stateView of
[fromException -> Just DensityTooLow] -> True
[] | othersCount == 0 -> True
Expand Down Expand Up @@ -508,13 +508,13 @@ prop_densityDisconnectTriggersChainSel =
advTip = case btbFull branch of
(AF.Empty _) -> error "alternate branch must have at least one block"
(_ AF.:> tipBlock) -> tipBlock
in mkPeers
in peers'
-- Eagerly serve the honest tree, but after the adversary has
-- advertised its chain up to the intersection.
[ (Time 0, scheduleTipPoint trunkTip),
[[(Time 0, scheduleTipPoint trunkTip),
(Time 0.5, scheduleHeaderPoint trunkTip),
(Time 0.5, scheduleBlockPoint trunkTip)
]
]]
-- Advertise the alternate branch early, but wait for the honest
-- node to have served its chain before disclosing the alternate
-- branch is not dense enough.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..),
defaultSchedulerConfig)
import Test.Consensus.PeerSimulator.StateView
import Test.Consensus.PointSchedule
import Test.Consensus.PointSchedule.Peers (Peers, mkPeers)
import Test.Consensus.PointSchedule.Peers (Peers, peers')
import Test.Consensus.PointSchedule.Shrinking (shrinkPeerSchedules)
import Test.Consensus.PointSchedule.SinglePeer (scheduleBlockPoint,
scheduleHeaderPoint, scheduleTipPoint)
Expand Down Expand Up @@ -94,10 +94,10 @@ prop_adversaryHitsTimeouts timeoutsEnabled =
branchTip = case btbFull branch of
(AF.Empty _) -> error "alternate branch must have at least one block"
(_ AF.:> tipBlock) -> tipBlock
in mkPeers
in peers'
-- Eagerly serve the honest tree, but after the adversary has
-- advertised its chain.
( (Time 0, scheduleTipPoint trunkTip) : case intersectM of
[ (Time 0, scheduleTipPoint trunkTip) : case intersectM of
Nothing ->
[ (Time 0.5, scheduleHeaderPoint trunkTip),
(Time 0.5, scheduleBlockPoint trunkTip)
Expand All @@ -108,7 +108,7 @@ prop_adversaryHitsTimeouts timeoutsEnabled =
(Time 5, scheduleHeaderPoint trunkTip),
(Time 5, scheduleBlockPoint trunkTip)
]
)
]
-- The one adversarial peer advertises and serves up to the
-- intersection early, then waits more than the short wait timeout.
[ (Time 0, scheduleTipPoint branchTip) : case intersectM of
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..),
defaultSchedulerConfig)
import Test.Consensus.PeerSimulator.StateView
import Test.Consensus.PointSchedule
import Test.Consensus.PointSchedule.Peers (Peers, mkPeers,
import Test.Consensus.PointSchedule.Peers (Peers, peers',
peersOnlyHonest)
import Test.Consensus.PointSchedule.Shrinking (shrinkPeerSchedules)
import Test.Consensus.PointSchedule.SinglePeer (scheduleBlockPoint,
Expand Down Expand Up @@ -230,10 +230,10 @@ prop_delayAttack lopEnabled =
branchTip = case btbFull branch of
(AF.Empty _) -> error "alternate branch must have at least one block"
(_ AF.:> tipBlock) -> tipBlock
in mkPeers
in peers'
-- Eagerly serve the honest tree, but after the adversary has
-- advertised its chain.
( (Time 0, scheduleTipPoint trunkTip) : case intersectM of
[ (Time 0, scheduleTipPoint trunkTip) : case intersectM of
Nothing ->
[ (Time 0.5, scheduleHeaderPoint trunkTip),
(Time 0.5, scheduleBlockPoint trunkTip)
Expand All @@ -244,7 +244,7 @@ prop_delayAttack lopEnabled =
(Time 5, scheduleHeaderPoint trunkTip),
(Time 5, scheduleBlockPoint trunkTip)
]
)
]
-- Advertise the alternate branch early, but don't serve it
-- past the intersection, and wait for LoP bucket.
[ (Time 0, scheduleTipPoint branchTip) : case intersectM of
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,7 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..),
defaultSchedulerConfig)
import Test.Consensus.PeerSimulator.StateView
import Test.Consensus.PointSchedule
import Test.Consensus.PointSchedule.Peers (PeerId (..), Peers (..),
value)
import Test.Consensus.PointSchedule.Peers (PeerId (..), Peers (..))
import Test.Consensus.PointSchedule.Shrinking
(shrinkByRemovingAdversaries, shrinkPeerSchedules)
import Test.Consensus.PointSchedule.SinglePeer
Expand Down Expand Up @@ -92,13 +91,13 @@ theProperty genesisTest stateView@StateView{svSelectedChain} =
-- to the governor that the density is too low.
longerThanGenesisWindow ==>
conjoin [
counterexample "The honest peer was disconnected" (HonestPeer `notElem` disconnected),
counterexample "The honest peer was disconnected" (HonestPeer 1 `notElem` disconnected),
counterexample ("The immutable tip is not honest: " ++ show immutableTip) $
property (isHonest immutableTipHash),
immutableTipIsRecent
]
where
advCount = Map.size (others (gtSchedule genesisTest))
advCount = Map.size (adversarialPeers (gtSchedule genesisTest))

immutableTipIsRecent =
counterexample ("Age of the immutable tip: " ++ show immutableTipAge) $
Expand Down Expand Up @@ -132,7 +131,7 @@ theProperty genesisTest stateView@StateView{svSelectedChain} =
[] -> "No peers were disconnected"
peers -> "Some peers were disconnected: " ++ intercalate ", " (condense <$> peers)

honestTipSlot = At $ blockSlot $ snd $ last $ mapMaybe fromBlockPoint $ value $ honest $ gtSchedule genesisTest
honestTipSlot = At $ blockSlot $ snd $ last $ mapMaybe fromBlockPoint $ (Map.! 1) $ honestPeers $ gtSchedule genesisTest

GenesisTest {gtBlockTree, gtGenesisWindow = GenesisWindow s, gtDelay = Delta d} = genesisTest

Expand Down Expand Up @@ -215,7 +214,7 @@ prop_leashingAttackStalling =
genLeashingSchedule :: GenesisTest TestBlock () -> QC.Gen (PeersSchedule TestBlock)
genLeashingSchedule genesisTest = do
Peers honest advs0 <- ensureScheduleDuration genesisTest <$> genUniformSchedulePoints genesisTest
advs <- mapM (mapM dropRandomPoints) advs0
advs <- mapM dropRandomPoints advs0
pure $ Peers honest advs

disableBoringTimeouts gt =
Expand Down Expand Up @@ -273,9 +272,9 @@ prop_leashingAttackTimeLimited =
let timeLimit = estimateTimeBound
(gtChainSyncTimeouts genesisTest)
(gtLoPBucketParams genesisTest)
(value honest)
(map value $ Map.elems advs0)
advs = fmap (fmap (takePointsUntil timeLimit)) advs0
(honest Map.! 1)
(Map.elems advs0)
advs = fmap (takePointsUntil timeLimit) advs0
extendedHonest = extendScheduleUntil timeLimit <$> honest
pure $ Peers extendedHonest advs

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import Control.Monad.Class.MonadTimer.SI (MonadTimer)
import Control.Tracer (Tracer (..), nullTracer, traceWith)
import Data.Foldable (for_)
import Data.Functor (void)
import qualified Data.List.NonEmpty as NonEmpty
import Data.Map.Strict (Map)
import qualified Data.Map.Strict as Map
import Ouroboros.Consensus.Block
Expand Down Expand Up @@ -470,7 +471,7 @@ runPointSchedule ::
m (StateView TestBlock)
runPointSchedule schedulerConfig genesisTest tracer0 =
withRegistry $ \registry -> do
peerSim <- makePeerSimulatorResources tracer gtBlockTree (getPeerIds gtSchedule)
peerSim <- makePeerSimulatorResources tracer gtBlockTree (NonEmpty.fromList $ getPeerIds gtSchedule)
lifecycle <- nodeLifecycle schedulerConfig genesisTest tracer registry peerSim
(chainDb, stateViewTracers) <- runScheduler
(Tracer $ traceWith tracer . TraceSchedulerEvent)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -598,11 +598,6 @@ instance Condense RenderCell where
CellEllipsis -> " .. "
RenderCell _ cell -> condense cell

renderPeerId :: PeerId -> String
renderPeerId = \case
HonestPeer -> "honest"
PeerId p -> p

slotWidth :: NonEmpty Cell -> SlotWidth
slotWidth =
maximum . fmap cellWidth
Expand All @@ -612,7 +607,7 @@ slotWidth =
CellPeers peerIds -> SlotWidth (sum (labelWidth <$> peerIds))
_ -> 1

labelWidth pid = 2 + length (renderPeerId pid)
labelWidth pid = 2 + length (show pid)

sortWidth = \case
CellHere as -> sum (pointWidth <$> as)
Expand Down Expand Up @@ -773,7 +768,7 @@ renderSlotNo config width num =

renderPeers :: [PeerId] -> Col
renderPeers peers =
ColCat [ColAspect (pure (Candidate p)) (ColString (" " ++ renderPeerId p)) | p <- peers]
ColCat [ColAspect (pure (Candidate p)) (ColString (" " ++ show p)) | p <- peers]

renderCell :: RenderConfig -> RenderCell -> Col
renderCell config@RenderConfig {ellipsis} = \case
Expand Down

0 comments on commit 958ad56

Please sign in to comment.