{-# LANGUAGE BangPatterns        #-}
{-# LANGUAGE DeriveFunctor       #-}
{-# LANGUAGE FlexibleContexts    #-}
{-# LANGUAGE NamedFieldPuns      #-}
{-# LANGUAGE RecordWildCards     #-}
{-# LANGUAGE ScopedTypeVariables #-}


-- | This subsystem manages the discovery and selection of /upstream/ peers.
--
module Ouroboros.Network.PeerSelection.Governor
  ( -- * Design overview
    -- $overview
    -- * Peer selection governor
    -- $peer-selection-governor
    PeerSelectionPolicy (..)
  , PeerSelectionTargets (..)
  , PeerSelectionActions (..)
  , PeerStateActions (..)
  , TracePeerSelection (..)
  , DebugPeerSelection (..)
  , peerSelectionGovernor
    -- * Peer churn governor
    -- $peer-churn-governor
  , peerChurnGovernor
    -- * Internals exported for testing
  , assertPeerSelectionState
  , sanePeerSelectionTargets
  , establishedPeersStatus
  , PeerSelectionState (..)
  , PeerSelectionCounters (..)
  , nullPeerSelectionTargets
  , emptyPeerSelectionState
  , ChurnMode (..)
  ) where

import           Data.Cache
import           Data.Semigroup (Min (..))
import           Data.Void (Void)

import           Control.Applicative (Alternative ((<|>)))
import           Control.Concurrent.JobPool (JobPool)
import qualified Control.Concurrent.JobPool as JobPool
import           Control.Monad.Class.MonadAsync
import           Control.Monad.Class.MonadSTM.Strict
import           Control.Monad.Class.MonadThrow
import           Control.Monad.Class.MonadTime
import           Control.Monad.Class.MonadTimer
import           Control.Tracer (Tracer (..), traceWith)
import           System.Random

import           Ouroboros.Network.BlockFetch (FetchMode (..))
import           Ouroboros.Network.Diffusion.Policies (closeConnectionTimeout)
import qualified Ouroboros.Network.PeerSelection.EstablishedPeers as EstablishedPeers
import qualified Ouroboros.Network.PeerSelection.Governor.ActivePeers as ActivePeers
import qualified Ouroboros.Network.PeerSelection.Governor.EstablishedPeers as EstablishedPeers
import qualified Ouroboros.Network.PeerSelection.Governor.KnownPeers as KnownPeers
import qualified Ouroboros.Network.PeerSelection.Governor.Monitor as Monitor
import qualified Ouroboros.Network.PeerSelection.Governor.RootPeers as RootPeers
import           Ouroboros.Network.PeerSelection.Governor.Types
import qualified Ouroboros.Network.PeerSelection.KnownPeers as KnownPeers
import           Ouroboros.Network.PeerSelection.PeerMetric

{- $overview

We have a number of requirements for constructing our connectivity graphs:

 * We must do it in a decentralised way, using only local information;
 * It should avoid and recover from accidental or deliberate partitions or
   eclipse attacks;
 * The graph should give us good performance for block diffusion. This means
   we need the combination of low hop counts, and minimising the hop lengths.
   We want one slot leader to be able to send to the next within the deadline
   in at least 95% of cases.

[\"Small world" graph theory](https://press.princeton.edu/books/paperback/9780691117041/small-worlds)
tells us that we can use random graph construction to make graphs with a low
characteristic path length (i.e. hop count). We can build random graphs with
random gossip techniques. This deals with our requirement for decentralisation
and our goal of low hop counts.

The remaining significant issues are:

 * the goal of short hop lengths, and
 * avoiding and recovering from partitions and eclipse attacks.

Our design is to augment random gossip with two /governors/ (control loops) to
address these two issues. The design is relatively simple, and has the virtue
that the policy for the governors can be adjusted with relatively few
compatibility impacts. This should enable the policy to be optimised based on
real-world feedback, and feedback from simulations of scale or scenarios that
are hard (or undesirable) to test in a real deployment.

Each node maintains three sets of known peer nodes:

 [cold peers]: are peers that are known of but where there is no established
               network connection;

 [warm peers]: are peers where a bearer connection is established but it is used
               only for network measurements and is not used for any application
               level consensus protocols;

 [hot peers]: are peers where the bearer connection is actively used for the
              application level consensus protocols.

Limited information is maintained for these peers, based on previous direct
interactions. For cold nodes this will often be absent as there may have been
no previous direct interactions. This information is comparable with
\"reputation\" in other systems, but it should be emphasised that it is purely
local and not shared with any other node. It is not shared because it is not
necessary and because establishing trust in such information is difficult and
would add additional complexity. The information about peers is kept
persistently across node restarts, but it is always safe to re-bootstrap – as
new nodes must do.

For an individual node to join the network, the bootstrapping phase starts by
contacting root nodes and requesting sets of other peers. Newly discovered
peers are added to the cold peer set. It proceeds iteratively by randomly
selecting other peers to contact to request more known peers. This gossip
process is controlled by a governor that has a target to find and maintain a
certain number of cold peers. Bootstrapping is not a special mode, rather it is
just a phase for the governor following starting with a cold peers set
consisting only of the root nodes. This gossiping aspect is closely analogous
to the first stage of Kademlia, but with random selection rather than selection
directed towards finding peers in an artificial metric space.

The root nodes used in the bootstrapping phase are the stakepool relays
published in the blockchain as part of the stakepool registration process.
See the [Shelley delegation design specification, Sections 3.4.4 and 4.2](https://hydra.iohk.io/job/Cardano/cardano-ledger-specs/delegationDesignSpec/latest/download-by-type/doc-pdf/delegation_design_spec).
As with Bitcoin, a recent snapshot of this root set must be distributed with
the software.

The peer selection governor engages in the following activities:

 * the random gossip used to discover more cold peers;
 * promotion of cold peers to be warm peers;
 * demotion of warm peers to cold peers;
 * promotion of warm peers to hot peers; and
 * demotion of hot peers to warm peers.

The peer selection governor has these goals to establish and maintain:

 * a target number of cold peers (e.g. 1000)
 * a target number of hot peers (e.g. order of 2–20)
 * a target number of warm peers (e.g. order of 10–50)
 * a set of warm peers that are sufficiently diverse in terms of hop distance
 * a target churn frequency for hot\/warm changes
 * a target churn frequency for warm\/cold changes
 * a target churn frequency for cold\/unknown changes

The target churn values are adjusted by the /peer churn governor/, which we
will discuss below.

Local static configuration can also be used to specify that certain known nodes
should be selected as hot or warm peers. This allows for fixed relationships
between nodes controlled by a single organisation, such as a stake pool with
several relays. It also enables private peering relationships between stake
pool operators and other likely deployment scenarios.

Using 5–20 hot peers is not as expensive as it might sound. Keep in mind that
only block headers are sent for each peer. The block body is typically only
requested once. It is also worth noting that the block body will tend to follow
the shortest paths through the connectivity graph formed by the hot peer links.
This is because nodes will typically request the block body from the first node
that sends the block header.

While the purpose of cold and hot peers is clear, the purpose of warm peers
requires further explanation. The primary purpose is to address the challenge
of avoiding too many long hops in the graph. The random gossip is oblivious to
hop distance. By actually connecting to a selection of peers and measuring the
round trip delays we can start to establish which peers are near or far. The
policy for selecting which warm peers to promote to hot peers will take into
account this network hop distance. The purpose of a degree of churn between
cold and warm peers is, in part, to discover the network distance for more
peers and enable further optimisation or adjust to changing conditions. The
purpose of a degree of churn between warm and hot peers is to allow potentially
better warm peers to take over from existing hot peers.

The purpose in maintaining a diversity in hop distances is to assist in
recovery from network events that may disrupt established short paths, such as
internet routing changes, partial loss of connectivity, or accidental formation
of cliques. For example, when a physical infrastructure failure causes the
short paths to a clique of nodes to be lost, if some or all of the nodes in
that clique maintain other longer distance warm links then they can quickly
promote them to hot links and recover. The time to promote from warm to hot
need be no more than one network round trip.

Overall, this approach follows a common pattern for probabilistic search or
optimisation that uses a balance of local optimisation with some elements of
higher order disruption to avoid becoming trapped in some poor local optimum.

The local peer reputation information is also updated when peer connections
fail. The implementation classifies the exceptions that cause connections to
fail into three classes:

 * internal node exceptions e.g. local disk corruption;
 * network failures e.g. dropped TCP connections; and
 * adversarial behaviour, e.g. a protocol violation detected by the
   typed-protocols layer or by the consensus layer.

In the case of adversarial behaviour the peer can be immediately demoted out of
the hot, warm and cold sets. We choose not to maintain negative peer
information for extended periods of time; to bound resources and due to the
simplicity of Sybil attacks.

The peer churn governor deals with the problem of partition and eclipse –
whether malicious or accidental. It adjusts the behaviour of the peer
selection governor over longer time scales. The outer peer churn governor's
actions are:

 * to adjust the target churn frequencies of the peer selection governor for
   promotion\/demotion between the cold\/warm\/hot states
 * partial or total re-bootstrapping under certain circumstances

The peer churn governor monitors the chain growth quality, comparing it with
the stake distribution. The probability of being in a disconnected clique or
being eclipsed is calculated. As this rises the governor increases the target
frequencies for the churn between the hot, warm, cold, and unknown states. In
the worst case it can re-bootstrap the peer discovery entirely by resetting
the set of known peers.
-}

{-

TODO: need to think about managing established connections with upstream/downstream peers in a more symmetric way.

Can we separate that connection management from policy of upstream/downstream selection?

Upstream peers are ones where we choose to talk to them, and we follow their
chain and submit transactions to them. There is a separate subsystem to manage
/downstream/ peers that initiate connections to us.

There is a distinction between which peer chooses to talk to which, and which
peer actually initiates the TCP connection. This is due to the fact that we
reuse TCP connections to run mini-protocols in both directions. So we can
choose to talk to another peer and find that they already initiated a TCP
connection to us, and so we reuse that. For example we can have cases like this:

 1. They initiate the connection to have our node as one of their upstream peers
 2. We decide to reuse the connection to have them as one of our upstream peers
 3. They decide to stop using us as an upstream peer

This is now more or less equivalent to our node having initiated the connection
in the first place because we chose to have them as an upstream peer.


-}


{- $peer-selection-governor

![A 19th century steam governor](https://upload.wikimedia.org/wikipedia/commons/c/c3/Centrifugal_governor_and_balanced_steam_valve_%28New_Catechism_of_the_Steam_Engine%2C_1904%29.jpg)

The 'peerSelectionGovernor' manages the discovery and selection of /upstream/
peers.

We classify (potential or actual) upstream peers in three nested categories:

@
                                                      ▲
                                               forget │
  ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┿━┿━━━━━━━━━━━━┓
  ┃                                                     │ discover   ┃
  ┃  Known peers: the set of all known peers.           ▼            ┃
  ┃  Consists of cold, warm and hot peers.                           ┃
  ┃  Expect ~1000                              demote ▲              ┃
  ┃                                            to cold│              ┃
  ┃ ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┿━┿━━━━━━━━━━┓ ┃
  ┃ ┃                                                   │ promote  ┃ ┃
  ┃ ┃  Established peers: with established bearer.      ▼ to warm  ┃ ┃
  ┃ ┃  Consists of warm and hot peers.                             ┃ ┃
  ┃ ┃  Expect ~10-50                           demote ▲            ┃ ┃
  ┃ ┃                                          to warm│            ┃ ┃
  ┃ ┃ ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┿━┿━━━━━━━━┓ ┃ ┃
  ┃ ┃ ┃                                                 │ promote┃ ┃ ┃
  ┃ ┃ ┃  Active peers: running consensus protocols.     ▼ to hot ┃ ┃ ┃
  ┃ ┃ ┃  Consists of hot peers.                                  ┃ ┃ ┃
  ┃ ┃ ┃  Expect ~2-20                                            ┃ ┃ ┃
  ┃ ┃ ┃                                                          ┃ ┃ ┃
  ┃ ┃ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ┃ ┃
  ┃ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ┃
  ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
@

We define the terms /known/, /established/ and /active/ to be nested sets.
We define the terms /cold/, /warm/ and /hot/ to be disjoint sets. Both
collections of terms are useful. For example there is information wish to
track for all known peers, irrespective of whether they are cold, warm or hot.

So we have six transitions to consider:

 * discover a cold peer
 * promote a cold peer to warm
 * promote a warm peer to hot
 * demote a hot peer to warm
 * demote a warm peer to cold
 * forget a cold peer

We want a design that separates the policy from the mechanism. We must
consider what kinds of policy we might like to express and make sure that
information that the policy needs can be made available.

We will consider each case.

== Discovering cold peers

There are two main mechanisms by which we discover cold peers:

 * Externally supplied peer root set
 * Peer gossip

=== Externally supplied peer root set

There are a few potential sources for root sets:

 * Simulation environment
 * IP address lists from static or dynamic configuration
 * DNS names from static or dynamic configuration
 * IP addresses or DNS names for stake pools registered in the blockchain

Note that none of these sources are fully static except for IP addresses from
static configuration. DNS name to IP address mappings are potentially dynamic.
DNS names can refer to both IPv4 and IPv6 addresses, and to pools of addresses.

In some cases we wish to advertise these root peers to others, and sometimes
we want to keep them private. In particular the deployment for stake pools may
involve keeping the stake pool node itself private, and only advertising
relays.

For an externally supplied peer root set, we divide the problem in two with an
interface where a root set provider is responsible for managing a time-varying
set of addresses, and the peer selection governor observes the time-varying
value. This allows multiple implementations of the root set provider, which
deal with the various sources.

=== Peer gossip

We can ask peers to give us a sample of their set of known peers.

For cold peers we can establish a one-shot connection to ask. For warm peers
we can also ask. We should not ask from the same peer too often. Peers are
expected to return the same set of answers over quite long periods of time.
(This helps peers to distinguish abusive behaviour and reduce the speed with
which the whole network can be mapped.)

So factors we might wish to base our decision on:

 * if we are below the target number of known peers
 * if there are any known peers we have not asked (or attempted to ask)
 * how long since we last asked (so we do not ask too frequently)
 * the known distance of the peer from the root set

This last factor is interesting. Consider what happens if we do a bootstrap
from one root peer. We'll ask it for some more peers and it will give us a
selection. Suppose we pick one of these to get more peers from and it gives us
a similar number of replies. If we now pick the next one randomly from our
combined set we'll have a roughly 50:50 chance of picking from either set.
This approach could quickly lead us into a mostly-depth first exploration of
the graph. But we probably want a more balanced approach between breadth first
and depth first. The traditional ways to do a breadth first or depth first is
to keep a queue or a stack of nodes that have not yet been asked.

Here's another danger: suppose we ask several nodes in parallel but suppose
one gets back to us quicker than all the others. If we are too quick to choose
again then we are giving an advantage to fast peers, and adversaries could
dedicate resources to exploit this to their advantage to get nodes to pick up
more peers from the set supplied by the adversary.

So this suggests that we should not give undue advantage to peers that respond
very quickly, and we should go mostly breadth first, but with a degree of
randomisation.


== Promoting a cold peer to warm

Promoting a cold peer to warm involves establishing a bearer connection. This
is initiated asynchronously and it is either successful or fails after a
timeout.

Once established, we track the connection for the established peer. The
established connection is used later to promote to hot, or to demote back to
cold. It is also used to be notified if the connection fails for any reason.

== Promoting a warm peer to hot

Promoting a warm peer to hot involves sending messages on the established
bearer to switch mode from the network protocol used with warm peers, to the
full set of consensus protocols used for hot peers.

== Demoting a hot peer to warm

If we have more hot peers than our target number (or target range) then we
want to pick one to demote. One policy is to pick randomly. It is likely to be
better to to pick the peer that is in some sense least useful.

One plausible measure of a peer being least useful is based on the following:
for blocks we adopt into our chain, look at which peer(s) received that header
first. A peer that is never first (or very rarely) is one that is likely to be
downstream from us and hence not useful as a choice of upstream peer. A peer
that is normally behind all others, but sometimes (even rarely) is first is
still useful, since it shows it's an upstream connection to some part of the
network where there are active block producers. Consider the case of a relay
in Europe with one connection to Australia: sometimes blocks will be produced
in Australia and so that connection may be first in those cases.

Tracking the necessary information for this policy would require a separate
component that observes the current chain and the peer candidate chains. Using
this information would need access to that shared state. So we should conclude
that the policy should not be pure as it may need access to such changing state.

== Demoting a warm peer to cold


== Forgetting cold peers

We will always forget known peers when the connection is terminated due to
detected adversarial behaviour. The remaining policy decision is which peers
to forget when we have more than our target number of known peers. We will
only select from the known peers that are cold. Warm or hot known peers have
to first be demoted to cold before we consider them to be forgotten.

We want to pick the least useful cold peers to forget. Factors we may wish to
base our decision on include:

 * Number of unsuccessful connection attempts since last successful connection
 * Pseudo-random selection: some degree of randomness can help mitigate
   accidental systematic correlations or some degree of adversarial behaviour.

-}


-- |
--
peerSelectionGovernor :: (MonadAsync m, MonadLabelledSTM m, MonadMask m,
                          MonadTime m, MonadTimer m, Ord peeraddr)
                      => Tracer m (TracePeerSelection peeraddr)
                      -> Tracer m (DebugPeerSelection peeraddr peerconn)
                      -> Tracer m PeerSelectionCounters
                      -> StdGen
                      -> PeerSelectionActions peeraddr peerconn m
                      -> PeerSelectionPolicy  peeraddr m
                      -> m Void
peerSelectionGovernor :: Tracer m (TracePeerSelection peeraddr)
-> Tracer m (DebugPeerSelection peeraddr peerconn)
-> Tracer m PeerSelectionCounters
-> StdGen
-> PeerSelectionActions peeraddr peerconn m
-> PeerSelectionPolicy peeraddr m
-> m Void
peerSelectionGovernor Tracer m (TracePeerSelection peeraddr)
tracer Tracer m (DebugPeerSelection peeraddr peerconn)
debugTracer Tracer m PeerSelectionCounters
countersTracer StdGen
fuzzRng PeerSelectionActions peeraddr peerconn m
actions PeerSelectionPolicy peeraddr m
policy =
    (JobPool () m (Completion m peeraddr peerconn) -> m Void) -> m Void
forall group (m :: * -> *) a b.
(MonadAsync m, MonadThrow m, MonadLabelledSTM m) =>
(JobPool group m a -> m b) -> m b
JobPool.withJobPool ((JobPool () m (Completion m peeraddr peerconn) -> m Void)
 -> m Void)
-> (JobPool () m (Completion m peeraddr peerconn) -> m Void)
-> m Void
forall a b. (a -> b) -> a -> b
$ \JobPool () m (Completion m peeraddr peerconn)
jobPool ->
      Tracer m (TracePeerSelection peeraddr)
-> Tracer m (DebugPeerSelection peeraddr peerconn)
-> Tracer m PeerSelectionCounters
-> PeerSelectionActions peeraddr peerconn m
-> PeerSelectionPolicy peeraddr m
-> JobPool () m (Completion m peeraddr peerconn)
-> PeerSelectionState peeraddr peerconn
-> m Void
forall (m :: * -> *) peeraddr peerconn.
(MonadAsync m, MonadMask m, MonadTime m, MonadTimer m,
 Ord peeraddr) =>
Tracer m (TracePeerSelection peeraddr)
-> Tracer m (DebugPeerSelection peeraddr peerconn)
-> Tracer m PeerSelectionCounters
-> PeerSelectionActions peeraddr peerconn m
-> PeerSelectionPolicy peeraddr m
-> JobPool () m (Completion m peeraddr peerconn)
-> PeerSelectionState peeraddr peerconn
-> m Void
peerSelectionGovernorLoop
        Tracer m (TracePeerSelection peeraddr)
tracer
        Tracer m (DebugPeerSelection peeraddr peerconn)
debugTracer
        Tracer m PeerSelectionCounters
countersTracer
        PeerSelectionActions peeraddr peerconn m
actions
        PeerSelectionPolicy peeraddr m
policy
        JobPool () m (Completion m peeraddr peerconn)
jobPool
        (StdGen -> PeerSelectionState peeraddr peerconn
forall peeraddr peerconn.
StdGen -> PeerSelectionState peeraddr peerconn
emptyPeerSelectionState StdGen
fuzzRng)

-- | Our pattern here is a loop with two sets of guarded actions:
--
-- * Actions guarded on predicates on the current immutable state, e.g.
--   * below known peer targets & below in-progress limit
--
-- * Actions guarded by blocking and waiting for state changes, e.g.
--   * root peer set changed
--   * churn timeout
--   * async action completed
--   * established connection failed
--
-- We check the internal actions first, and otherwise the blocking actions.
-- In each case we trace the action, update the state and execute the
-- action asynchronously.
--
peerSelectionGovernorLoop :: forall m peeraddr peerconn.
                             (MonadAsync m, MonadMask m,
                              MonadTime m, MonadTimer m,
                              Ord peeraddr)
                          => Tracer m (TracePeerSelection peeraddr)
                          -> Tracer m (DebugPeerSelection peeraddr peerconn)
                          -> Tracer m PeerSelectionCounters
                          -> PeerSelectionActions peeraddr peerconn m
                          -> PeerSelectionPolicy  peeraddr m
                          -> JobPool () m (Completion m peeraddr peerconn)
                          -> PeerSelectionState peeraddr peerconn
                          -> m Void
peerSelectionGovernorLoop :: Tracer m (TracePeerSelection peeraddr)
-> Tracer m (DebugPeerSelection peeraddr peerconn)
-> Tracer m PeerSelectionCounters
-> PeerSelectionActions peeraddr peerconn m
-> PeerSelectionPolicy peeraddr m
-> JobPool () m (Completion m peeraddr peerconn)
-> PeerSelectionState peeraddr peerconn
-> m Void
peerSelectionGovernorLoop Tracer m (TracePeerSelection peeraddr)
tracer
                          Tracer m (DebugPeerSelection peeraddr peerconn)
debugTracer
                          Tracer m PeerSelectionCounters
countersTracer
                          PeerSelectionActions peeraddr peerconn m
actions
                          PeerSelectionPolicy peeraddr m
policy
                          JobPool () m (Completion m peeraddr peerconn)
jobPool =
    PeerSelectionState peeraddr peerconn -> m Void
loop
  where
    loop :: PeerSelectionState peeraddr peerconn -> m Void
    loop :: PeerSelectionState peeraddr peerconn -> m Void
loop !PeerSelectionState peeraddr peerconn
st = PeerSelectionState peeraddr peerconn -> m Void -> m Void
forall peeraddr peerconn a.
Ord peeraddr =>
PeerSelectionState peeraddr peerconn -> a -> a
assertPeerSelectionState PeerSelectionState peeraddr peerconn
st (m Void -> m Void) -> m Void -> m Void
forall a b. (a -> b) -> a -> b
$ do
      Time
blockedAt <- m Time
forall (m :: * -> *). MonadMonotonicTime m => m Time
getMonotonicTime
      let knownPeers' :: KnownPeers peeraddr
knownPeers'       = Time -> KnownPeers peeraddr -> KnownPeers peeraddr
forall peeraddr.
Ord peeraddr =>
Time -> KnownPeers peeraddr -> KnownPeers peeraddr
KnownPeers.setCurrentTime Time
blockedAt (PeerSelectionState peeraddr peerconn -> KnownPeers peeraddr
forall peeraddr peerconn.
PeerSelectionState peeraddr peerconn -> KnownPeers peeraddr
knownPeers PeerSelectionState peeraddr peerconn
st)
          establishedPeers' :: EstablishedPeers peeraddr peerconn
establishedPeers' = Time
-> EstablishedPeers peeraddr peerconn
-> EstablishedPeers peeraddr peerconn
forall peeraddr peerconn.
Ord peeraddr =>
Time
-> EstablishedPeers peeraddr peerconn
-> EstablishedPeers peeraddr peerconn
EstablishedPeers.setCurrentTime Time
blockedAt (PeerSelectionState peeraddr peerconn
-> EstablishedPeers peeraddr peerconn
forall peeraddr peerconn.
PeerSelectionState peeraddr peerconn
-> EstablishedPeers peeraddr peerconn
establishedPeers PeerSelectionState peeraddr peerconn
st)
          st' :: PeerSelectionState peeraddr peerconn
st'               = PeerSelectionState peeraddr peerconn
st { knownPeers :: KnownPeers peeraddr
knownPeers       = KnownPeers peeraddr
knownPeers',
                                   establishedPeers :: EstablishedPeers peeraddr peerconn
establishedPeers = EstablishedPeers peeraddr peerconn
establishedPeers' }

      TimedDecision m peeraddr peerconn
timedDecision <- Time
-> PeerSelectionState peeraddr peerconn
-> m (TimedDecision m peeraddr peerconn)
evalGuardedDecisions Time
blockedAt PeerSelectionState peeraddr peerconn
st'

      -- get the current time after the governor returned from the blocking
      -- 'evalGuardedDecisions' call.
      Time
now <- m Time
forall (m :: * -> *). MonadMonotonicTime m => m Time
getMonotonicTime
      let Decision { TracePeerSelection peeraddr
decisionTrace :: forall (m :: * -> *) peeraddr peerconn.
Decision m peeraddr peerconn -> TracePeerSelection peeraddr
decisionTrace :: TracePeerSelection peeraddr
decisionTrace, [Job () m (Completion m peeraddr peerconn)]
decisionJobs :: forall (m :: * -> *) peeraddr peerconn.
Decision m peeraddr peerconn
-> [Job () m (Completion m peeraddr peerconn)]
decisionJobs :: [Job () m (Completion m peeraddr peerconn)]
decisionJobs, PeerSelectionState peeraddr peerconn
decisionState :: forall (m :: * -> *) peeraddr peerconn.
Decision m peeraddr peerconn
-> PeerSelectionState peeraddr peerconn
decisionState :: PeerSelectionState peeraddr peerconn
decisionState } =
            TimedDecision m peeraddr peerconn
timedDecision Time
now
          newCounters :: PeerSelectionCounters
newCounters = PeerSelectionState peeraddr peerconn -> PeerSelectionCounters
forall peeraddr peerconn.
Ord peeraddr =>
PeerSelectionState peeraddr peerconn -> PeerSelectionCounters
peerStateToCounters PeerSelectionState peeraddr peerconn
decisionState
      Tracer m (TracePeerSelection peeraddr)
-> TracePeerSelection peeraddr -> m ()
forall (m :: * -> *) a. Tracer m a -> a -> m ()
traceWith Tracer m (TracePeerSelection peeraddr)
tracer TracePeerSelection peeraddr
decisionTrace
      Tracer m PeerSelectionCounters
-> Cache PeerSelectionCounters -> PeerSelectionCounters -> m ()
forall (m :: * -> *) a.
(Applicative m, Eq a) =>
Tracer m a -> Cache a -> a -> m ()
traceWithCache Tracer m PeerSelectionCounters
countersTracer
                     (PeerSelectionState peeraddr peerconn -> Cache PeerSelectionCounters
forall peeraddr peerconn.
PeerSelectionState peeraddr peerconn -> Cache PeerSelectionCounters
countersCache PeerSelectionState peeraddr peerconn
decisionState)
                     PeerSelectionCounters
newCounters

      (Job () m (Completion m peeraddr peerconn) -> m ())
-> [Job () m (Completion m peeraddr peerconn)] -> m ()
forall (t :: * -> *) (m :: * -> *) a b.
(Foldable t, Monad m) =>
(a -> m b) -> t a -> m ()
mapM_ (JobPool () m (Completion m peeraddr peerconn)
-> Job () m (Completion m peeraddr peerconn) -> m ()
forall group (m :: * -> *) a.
(MonadAsync m, MonadMask m, Ord group) =>
JobPool group m a -> Job group m a -> m ()
JobPool.forkJob JobPool () m (Completion m peeraddr peerconn)
jobPool) [Job () m (Completion m peeraddr peerconn)]
decisionJobs
      PeerSelectionState peeraddr peerconn -> m Void
loop (PeerSelectionState peeraddr peerconn
decisionState { countersCache :: Cache PeerSelectionCounters
countersCache = PeerSelectionCounters -> Cache PeerSelectionCounters
forall a. a -> Cache a
Cache PeerSelectionCounters
newCounters })

    evalGuardedDecisions :: Time
                         -> PeerSelectionState peeraddr peerconn
                         -> m (TimedDecision m peeraddr peerconn)
    evalGuardedDecisions :: Time
-> PeerSelectionState peeraddr peerconn
-> m (TimedDecision m peeraddr peerconn)
evalGuardedDecisions Time
blockedAt PeerSelectionState peeraddr peerconn
st =
      case Time
-> PeerSelectionState peeraddr peerconn
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
guardedDecisions Time
blockedAt PeerSelectionState peeraddr peerconn
st of
        GuardedSkip Maybe (Min Time)
_ ->
          -- impossible since guardedDecisions always has something to wait for
          [Char] -> m (TimedDecision m peeraddr peerconn)
forall a. HasCallStack => [Char] -> a
error [Char]
"peerSelectionGovernorLoop: impossible: nothing to do"

        Guarded Nothing decisionAction -> do
          Tracer m (DebugPeerSelection peeraddr peerconn)
-> DebugPeerSelection peeraddr peerconn -> m ()
forall (m :: * -> *) a. Tracer m a -> a -> m ()
traceWith Tracer m (DebugPeerSelection peeraddr peerconn)
debugTracer (Time
-> Maybe DiffTime
-> PeerSelectionState peeraddr peerconn
-> DebugPeerSelection peeraddr peerconn
forall peeraddr peerconn.
Time
-> Maybe DiffTime
-> PeerSelectionState peeraddr peerconn
-> DebugPeerSelection peeraddr peerconn
TraceGovernorState Time
blockedAt Maybe DiffTime
forall a. Maybe a
Nothing PeerSelectionState peeraddr peerconn
st)
          STM m (TimedDecision m peeraddr peerconn)
-> m (TimedDecision m peeraddr peerconn)
forall (m :: * -> *) a.
(MonadSTM m, HasCallStack) =>
STM m a -> m a
atomically STM m (TimedDecision m peeraddr peerconn)
decisionAction

        Guarded (Just (Min wakeupAt)) decisionAction -> do
          let wakeupIn :: DiffTime
wakeupIn = Time -> Time -> DiffTime
diffTime Time
wakeupAt Time
blockedAt
          Tracer m (DebugPeerSelection peeraddr peerconn)
-> DebugPeerSelection peeraddr peerconn -> m ()
forall (m :: * -> *) a. Tracer m a -> a -> m ()
traceWith Tracer m (DebugPeerSelection peeraddr peerconn)
debugTracer (Time
-> Maybe DiffTime
-> PeerSelectionState peeraddr peerconn
-> DebugPeerSelection peeraddr peerconn
forall peeraddr peerconn.
Time
-> Maybe DiffTime
-> PeerSelectionState peeraddr peerconn
-> DebugPeerSelection peeraddr peerconn
TraceGovernorState Time
blockedAt (DiffTime -> Maybe DiffTime
forall a. a -> Maybe a
Just DiffTime
wakeupIn) PeerSelectionState peeraddr peerconn
st)
          Timeout m
wakupTimeout <- DiffTime -> m (Timeout m)
forall (m :: * -> *). MonadTimer m => DiffTime -> m (Timeout m)
newTimeout DiffTime
wakeupIn
          let wakeup :: STM m (TimedDecision m peeraddr peerconn)
wakeup    = Timeout m -> STM m Bool
forall (m :: * -> *). MonadTimer m => Timeout m -> STM m Bool
awaitTimeout Timeout m
wakupTimeout STM m Bool
-> STM m (TimedDecision m peeraddr peerconn)
-> STM m (TimedDecision m peeraddr peerconn)
forall (m :: * -> *) a b. Monad m => m a -> m b -> m b
>> TimedDecision m peeraddr peerconn
-> STM m (TimedDecision m peeraddr peerconn)
forall (f :: * -> *) a. Applicative f => a -> f a
pure (PeerSelectionState peeraddr peerconn
-> TimedDecision m peeraddr peerconn
forall peeraddr peerconn (m :: * -> *).
PeerSelectionState peeraddr peerconn
-> TimedDecision m peeraddr peerconn
wakeupDecision PeerSelectionState peeraddr peerconn
st)
          TimedDecision m peeraddr peerconn
timedDecision     <- STM m (TimedDecision m peeraddr peerconn)
-> m (TimedDecision m peeraddr peerconn)
forall (m :: * -> *) a.
(MonadSTM m, HasCallStack) =>
STM m a -> m a
atomically (STM m (TimedDecision m peeraddr peerconn)
decisionAction STM m (TimedDecision m peeraddr peerconn)
-> STM m (TimedDecision m peeraddr peerconn)
-> STM m (TimedDecision m peeraddr peerconn)
forall (f :: * -> *) a. Alternative f => f a -> f a -> f a
<|> STM m (TimedDecision m peeraddr peerconn)
forall (m :: * -> *). STM m (TimedDecision m peeraddr peerconn)
wakeup)
          Timeout m -> m ()
forall (m :: * -> *). MonadTimer m => Timeout m -> m ()
cancelTimeout Timeout m
wakupTimeout
          TimedDecision m peeraddr peerconn
-> m (TimedDecision m peeraddr peerconn)
forall (m :: * -> *) a. Monad m => a -> m a
return TimedDecision m peeraddr peerconn
timedDecision

    guardedDecisions :: Time
                     -> PeerSelectionState peeraddr peerconn
                     -> Guarded (STM m) (TimedDecision m peeraddr peerconn)
    guardedDecisions :: Time
-> PeerSelectionState peeraddr peerconn
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
guardedDecisions Time
blockedAt PeerSelectionState peeraddr peerconn
st =
      -- All the alternative potentially-blocking decisions.
         PeerSelectionActions peeraddr peerconn m
-> PeerSelectionState peeraddr peerconn
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall (m :: * -> *) peeraddr peerconn.
(MonadSTM m, Ord peeraddr) =>
PeerSelectionActions peeraddr peerconn m
-> PeerSelectionState peeraddr peerconn
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
Monitor.connections          PeerSelectionActions peeraddr peerconn m
actions PeerSelectionState peeraddr peerconn
st
      Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall a. Semigroup a => a -> a -> a
<> JobPool () m (Completion m peeraddr peerconn)
-> PeerSelectionState peeraddr peerconn
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall (m :: * -> *) peeraddr peerconn.
MonadSTM m =>
JobPool () m (Completion m peeraddr peerconn)
-> PeerSelectionState peeraddr peerconn
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
Monitor.jobs                 JobPool () m (Completion m peeraddr peerconn)
jobPool PeerSelectionState peeraddr peerconn
st
      Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall a. Semigroup a => a -> a -> a
<> PeerSelectionActions peeraddr peerconn m
-> PeerSelectionState peeraddr peerconn
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall (m :: * -> *) peeraddr peerconn.
(MonadSTM m, Ord peeraddr) =>
PeerSelectionActions peeraddr peerconn m
-> PeerSelectionState peeraddr peerconn
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
Monitor.targetPeers          PeerSelectionActions peeraddr peerconn m
actions PeerSelectionState peeraddr peerconn
st
      Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall a. Semigroup a => a -> a -> a
<> PeerSelectionActions peeraddr peerconn m
-> PeerSelectionState peeraddr peerconn
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall peeraddr peerconn (m :: * -> *).
(MonadSTM m, Ord peeraddr) =>
PeerSelectionActions peeraddr peerconn m
-> PeerSelectionState peeraddr peerconn
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
Monitor.localRoots           PeerSelectionActions peeraddr peerconn m
actions PeerSelectionState peeraddr peerconn
st

      -- All the alternative non-blocking internal decisions.
      Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall a. Semigroup a => a -> a -> a
<> PeerSelectionActions peeraddr peerconn m
-> Time
-> PeerSelectionState peeraddr peerconn
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall (m :: * -> *) peeraddr peerconn.
(MonadSTM m, Ord peeraddr) =>
PeerSelectionActions peeraddr peerconn m
-> Time
-> PeerSelectionState peeraddr peerconn
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
RootPeers.belowTarget        PeerSelectionActions peeraddr peerconn m
actions Time
blockedAt  PeerSelectionState peeraddr peerconn
st
      Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall a. Semigroup a => a -> a -> a
<> PeerSelectionActions peeraddr peerconn m
-> MkGuardedDecision peeraddr peerconn m
forall (m :: * -> *) peeraddr peerconn.
(MonadAsync m, MonadTimer m, Ord peeraddr) =>
PeerSelectionActions peeraddr peerconn m
-> MkGuardedDecision peeraddr peerconn m
KnownPeers.belowTarget       PeerSelectionActions peeraddr peerconn m
actions     PeerSelectionPolicy peeraddr m
policy PeerSelectionState peeraddr peerconn
st
      Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall a. Semigroup a => a -> a -> a
<> MkGuardedDecision peeraddr peerconn m
forall (m :: * -> *) peeraddr peerconn.
(MonadSTM m, Ord peeraddr) =>
MkGuardedDecision peeraddr peerconn m
KnownPeers.aboveTarget                   PeerSelectionPolicy peeraddr m
policy PeerSelectionState peeraddr peerconn
st
      Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall a. Semigroup a => a -> a -> a
<> PeerSelectionActions peeraddr peerconn m
-> MkGuardedDecision peeraddr peerconn m
forall peeraddr peerconn (m :: * -> *).
(MonadSTM m, Ord peeraddr) =>
PeerSelectionActions peeraddr peerconn m
-> MkGuardedDecision peeraddr peerconn m
EstablishedPeers.belowTarget PeerSelectionActions peeraddr peerconn m
actions     PeerSelectionPolicy peeraddr m
policy PeerSelectionState peeraddr peerconn
st
      Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall a. Semigroup a => a -> a -> a
<> PeerSelectionActions peeraddr peerconn m
-> MkGuardedDecision peeraddr peerconn m
forall peeraddr peerconn (m :: * -> *).
(MonadSTM m, Ord peeraddr) =>
PeerSelectionActions peeraddr peerconn m
-> MkGuardedDecision peeraddr peerconn m
EstablishedPeers.aboveTarget PeerSelectionActions peeraddr peerconn m
actions     PeerSelectionPolicy peeraddr m
policy PeerSelectionState peeraddr peerconn
st
      Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall a. Semigroup a => a -> a -> a
<> PeerSelectionActions peeraddr peerconn m
-> MkGuardedDecision peeraddr peerconn m
forall peeraddr peerconn (m :: * -> *).
(MonadDelay m, MonadSTM m, Ord peeraddr) =>
PeerSelectionActions peeraddr peerconn m
-> MkGuardedDecision peeraddr peerconn m
ActivePeers.belowTarget      PeerSelectionActions peeraddr peerconn m
actions     PeerSelectionPolicy peeraddr m
policy PeerSelectionState peeraddr peerconn
st
      Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
-> Guarded (STM m) (TimedDecision m peeraddr peerconn)
forall a. Semigroup a => a -> a -> a
<> PeerSelectionActions peeraddr peerconn m
-> MkGuardedDecision peeraddr peerconn m
forall peeraddr peerconn (m :: * -> *).
(MonadSTM m, Ord peeraddr) =>
PeerSelectionActions peeraddr peerconn m
-> MkGuardedDecision peeraddr peerconn m
ActivePeers.aboveTarget      PeerSelectionActions peeraddr peerconn m
actions     PeerSelectionPolicy peeraddr m
policy PeerSelectionState peeraddr peerconn
st

      -- There is no rootPeersAboveTarget since the roots target is one sided.

      -- The changedTargets needs to come before the changedLocalRootPeers in
      -- the list of alternates above because our invariant requires that
      -- the number of root nodes be less than our target for known peers,
      -- but at startup our initial targets are 0, so we need to read and set
      -- the targets before we set the root peer set. Otherwise we violate our
      -- invariant (and if we ignored that, we'd try to immediately forget
      -- roots peers because we'd be above target for known peers).


wakeupDecision :: PeerSelectionState peeraddr peerconn
               -> TimedDecision m peeraddr peerconn
wakeupDecision :: PeerSelectionState peeraddr peerconn
-> TimedDecision m peeraddr peerconn
wakeupDecision PeerSelectionState peeraddr peerconn
st Time
_now =
  Decision :: forall (m :: * -> *) peeraddr peerconn.
TracePeerSelection peeraddr
-> PeerSelectionState peeraddr peerconn
-> [Job () m (Completion m peeraddr peerconn)]
-> Decision m peeraddr peerconn
Decision {
    decisionTrace :: TracePeerSelection peeraddr
decisionTrace = TracePeerSelection peeraddr
forall peeraddr. TracePeerSelection peeraddr
TraceGovernorWakeup,
    decisionState :: PeerSelectionState peeraddr peerconn
decisionState = PeerSelectionState peeraddr peerconn
st,
    decisionJobs :: [Job () m (Completion m peeraddr peerconn)]
decisionJobs  = []
  }


------------------------
-- Peer churn governor
--

{-
$peer-churn-governor
-}

-- |
--
peerChurnGovernor :: forall m peeraddr.
                     ( MonadSTM m
                     , MonadMonotonicTime m
                     , MonadDelay m
                     )
                  => Tracer m (TracePeerSelection peeraddr)
                  -> PeerMetrics m peeraddr
                  -> StrictTVar m ChurnMode
                  -> StdGen
                  -> STM m FetchMode
                  -> PeerSelectionTargets
                  -> StrictTVar m PeerSelectionTargets
                  -> m Void
peerChurnGovernor :: Tracer m (TracePeerSelection peeraddr)
-> PeerMetrics m peeraddr
-> StrictTVar m ChurnMode
-> StdGen
-> STM m FetchMode
-> PeerSelectionTargets
-> StrictTVar m PeerSelectionTargets
-> m Void
peerChurnGovernor Tracer m (TracePeerSelection peeraddr)
tracer PeerMetrics m peeraddr
_metrics StrictTVar m ChurnMode
churnModeVar StdGen
inRng STM m FetchMode
getFetchMode PeerSelectionTargets
base StrictTVar m PeerSelectionTargets
peerSelectionVar = do
  -- Wait a while so that not only the closest peers have had the time
  -- to become warm.
  Time
startTs0 <- m Time
forall (m :: * -> *). MonadMonotonicTime m => m Time
getMonotonicTime
  -- TODO: revisit the policy once we have local root peers in the governor.
  -- The intention is to give local root peers give head start and avoid
  -- giving advantage to hostile and quick root peers.
  DiffTime -> m ()
forall (m :: * -> *). MonadDelay m => DiffTime -> m ()
threadDelay DiffTime
3
  ChurnMode
mode <- STM m ChurnMode -> m ChurnMode
forall (m :: * -> *) a.
(MonadSTM m, HasCallStack) =>
STM m a -> m a
atomically STM m ChurnMode
updateChurnMode
  STM m () -> m ()
forall (m :: * -> *) a.
(MonadSTM m, HasCallStack) =>
STM m a -> m a
atomically (STM m () -> m ()) -> STM m () -> m ()
forall a b. (a -> b) -> a -> b
$ ChurnMode -> STM m ()
increaseActivePeers ChurnMode
mode
  Time
endTs0 <- m Time
forall (m :: * -> *). MonadMonotonicTime m => m Time
getMonotonicTime
  StdGen -> DiffTime -> m StdGen
fuzzyDelay StdGen
inRng (Time
endTs0 Time -> Time -> DiffTime
`diffTime` Time
startTs0) m StdGen -> (StdGen -> m Void) -> m Void
forall (m :: * -> *) a b. Monad m => m a -> (a -> m b) -> m b
>>= StdGen -> m Void
go

  where

    updateChurnMode :: STM m ChurnMode
    updateChurnMode :: STM m ChurnMode
updateChurnMode = do
        FetchMode
fm <- STM m FetchMode
getFetchMode
        let mode :: ChurnMode
mode = case FetchMode
fm of
                        FetchMode
FetchModeDeadline -> ChurnMode
ChurnModeNormal
                        FetchMode
FetchModeBulkSync -> ChurnMode
ChurnModeBulkSync
        StrictTVar m ChurnMode -> ChurnMode -> STM m ()
forall (m :: * -> *) a.
(MonadSTM m, HasCallStack) =>
StrictTVar m a -> a -> STM m ()
writeTVar StrictTVar m ChurnMode
churnModeVar ChurnMode
mode
        ChurnMode -> STM m ChurnMode
forall (m :: * -> *) a. Monad m => a -> m a
return ChurnMode
mode

    -- TODO: #3396 revisit the policy for genesis
    increaseActivePeers :: ChurnMode -> STM m ()
    increaseActivePeers :: ChurnMode -> STM m ()
increaseActivePeers ChurnMode
mode =  do
        StrictTVar m PeerSelectionTargets
-> (PeerSelectionTargets -> PeerSelectionTargets) -> STM m ()
forall (m :: * -> *) a.
MonadSTM m =>
StrictTVar m a -> (a -> a) -> STM m ()
modifyTVar StrictTVar m PeerSelectionTargets
peerSelectionVar (\PeerSelectionTargets
targets -> PeerSelectionTargets
targets {
          targetNumberOfActivePeers :: Int
targetNumberOfActivePeers =
              case ChurnMode
mode of
                   ChurnMode
ChurnModeNormal  ->
                       PeerSelectionTargets -> Int
targetNumberOfActivePeers PeerSelectionTargets
base
                   ChurnMode
ChurnModeBulkSync ->
                       Int -> Int -> Int
forall a. Ord a => a -> a -> a
min Int
2 (PeerSelectionTargets -> Int
targetNumberOfActivePeers PeerSelectionTargets
base)
        })

    decreaseActivePeers :: ChurnMode -> STM m ()
    decreaseActivePeers :: ChurnMode -> STM m ()
decreaseActivePeers ChurnMode
mode =  do
        StrictTVar m PeerSelectionTargets
-> (PeerSelectionTargets -> PeerSelectionTargets) -> STM m ()
forall (m :: * -> *) a.
MonadSTM m =>
StrictTVar m a -> (a -> a) -> STM m ()
modifyTVar StrictTVar m PeerSelectionTargets
peerSelectionVar (\PeerSelectionTargets
targets -> PeerSelectionTargets
targets {
          targetNumberOfActivePeers :: Int
targetNumberOfActivePeers =
              case ChurnMode
mode of
                   ChurnMode
ChurnModeNormal ->
                       Int -> Int
decrease (Int -> Int) -> Int -> Int
forall a b. (a -> b) -> a -> b
$ PeerSelectionTargets -> Int
targetNumberOfActivePeers PeerSelectionTargets
base
                   ChurnMode
ChurnModeBulkSync ->
                       Int -> Int -> Int
forall a. Ord a => a -> a -> a
min Int
1 (PeerSelectionTargets -> Int
targetNumberOfActivePeers PeerSelectionTargets
base Int -> Int -> Int
forall a. Num a => a -> a -> a
- Int
1)
        })


    go :: StdGen -> m Void
    go :: StdGen -> m Void
go !StdGen
rng = do
      Time
startTs <- m Time
forall (m :: * -> *). MonadMonotonicTime m => m Time
getMonotonicTime

      ChurnMode
churnMode <- STM m ChurnMode -> m ChurnMode
forall (m :: * -> *) a.
(MonadSTM m, HasCallStack) =>
STM m a -> m a
atomically STM m ChurnMode
updateChurnMode
      Tracer m (TracePeerSelection peeraddr)
-> TracePeerSelection peeraddr -> m ()
forall (m :: * -> *) a. Tracer m a -> a -> m ()
traceWith Tracer m (TracePeerSelection peeraddr)
tracer (TracePeerSelection peeraddr -> m ())
-> TracePeerSelection peeraddr -> m ()
forall a b. (a -> b) -> a -> b
$ ChurnMode -> TracePeerSelection peeraddr
forall peeraddr. ChurnMode -> TracePeerSelection peeraddr
TraceChurnMode ChurnMode
churnMode

      -- Purge the worst active peer(s).
      STM m () -> m ()
forall (m :: * -> *) a.
(MonadSTM m, HasCallStack) =>
STM m a -> m a
atomically (STM m () -> m ()) -> STM m () -> m ()
forall a b. (a -> b) -> a -> b
$ ChurnMode -> STM m ()
decreaseActivePeers ChurnMode
churnMode

      -- Short delay, we may have no active peers right now
      DiffTime -> m ()
forall (m :: * -> *). MonadDelay m => DiffTime -> m ()
threadDelay DiffTime
1

      -- Pick new active peer(s) based on the best performing established
      -- peers.
      STM m () -> m ()
forall (m :: * -> *) a.
(MonadSTM m, HasCallStack) =>
STM m a -> m a
atomically (STM m () -> m ()) -> STM m () -> m ()
forall a b. (a -> b) -> a -> b
$ ChurnMode -> STM m ()
increaseActivePeers ChurnMode
churnMode

      -- Give the promotion process time to start
      DiffTime -> m ()
forall (m :: * -> *). MonadDelay m => DiffTime -> m ()
threadDelay DiffTime
1

      -- Forget the worst performing non-active peers.
      STM m () -> m ()
forall (m :: * -> *) a.
(MonadSTM m, HasCallStack) =>
STM m a -> m a
atomically (STM m () -> m ()) -> STM m () -> m ()
forall a b. (a -> b) -> a -> b
$ StrictTVar m PeerSelectionTargets
-> (PeerSelectionTargets -> PeerSelectionTargets) -> STM m ()
forall (m :: * -> *) a.
MonadSTM m =>
StrictTVar m a -> (a -> a) -> STM m ()
modifyTVar StrictTVar m PeerSelectionTargets
peerSelectionVar (\PeerSelectionTargets
targets -> PeerSelectionTargets
targets {
          targetNumberOfRootPeers :: Int
targetNumberOfRootPeers = Int -> Int
decrease (PeerSelectionTargets -> Int
targetNumberOfRootPeers PeerSelectionTargets
base)
        , targetNumberOfKnownPeers :: Int
targetNumberOfKnownPeers = Int -> Int
decrease (PeerSelectionTargets -> Int
targetNumberOfKnownPeers PeerSelectionTargets
base)
        , targetNumberOfEstablishedPeers :: Int
targetNumberOfEstablishedPeers =
              Int -> Int
decrease (PeerSelectionTargets -> Int
targetNumberOfEstablishedPeers PeerSelectionTargets
base)
        })

      -- Give the governor time to properly demote them.
      DiffTime -> m ()
forall (m :: * -> *). MonadDelay m => DiffTime -> m ()
threadDelay (DiffTime -> m ()) -> DiffTime -> m ()
forall a b. (a -> b) -> a -> b
$ DiffTime
1 DiffTime -> DiffTime -> DiffTime
forall a. Num a => a -> a -> a
+ DiffTime
closeConnectionTimeout

      -- Pick new non-active peers
      STM m () -> m ()
forall (m :: * -> *) a.
(MonadSTM m, HasCallStack) =>
STM m a -> m a
atomically (STM m () -> m ()) -> STM m () -> m ()
forall a b. (a -> b) -> a -> b
$ StrictTVar m PeerSelectionTargets
-> (PeerSelectionTargets -> PeerSelectionTargets) -> STM m ()
forall (m :: * -> *) a.
MonadSTM m =>
StrictTVar m a -> (a -> a) -> STM m ()
modifyTVar StrictTVar m PeerSelectionTargets
peerSelectionVar (\PeerSelectionTargets
targets -> PeerSelectionTargets
targets {
          targetNumberOfRootPeers :: Int
targetNumberOfRootPeers = PeerSelectionTargets -> Int
targetNumberOfRootPeers PeerSelectionTargets
base
        , targetNumberOfKnownPeers :: Int
targetNumberOfKnownPeers = PeerSelectionTargets -> Int
targetNumberOfKnownPeers PeerSelectionTargets
base
        , targetNumberOfEstablishedPeers :: Int
targetNumberOfEstablishedPeers = PeerSelectionTargets -> Int
targetNumberOfEstablishedPeers PeerSelectionTargets
base
        })
      Time
endTs <- m Time
forall (m :: * -> *). MonadMonotonicTime m => m Time
getMonotonicTime

      StdGen -> DiffTime -> m StdGen
fuzzyDelay StdGen
rng (Time
endTs Time -> Time -> DiffTime
`diffTime` Time
startTs) m StdGen -> (StdGen -> m Void) -> m Void
forall (m :: * -> *) a b. Monad m => m a -> (a -> m b) -> m b
>>= StdGen -> m Void
go

    -- Randomly delay between churnInterval and churnInterval + maxFuzz seconds.
    fuzzyDelay :: StdGen -> DiffTime -> m StdGen
    fuzzyDelay :: StdGen -> DiffTime -> m StdGen
fuzzyDelay StdGen
rng DiffTime
execTime = do
      FetchMode
mode <- STM m FetchMode -> m FetchMode
forall (m :: * -> *) a.
(MonadSTM m, HasCallStack) =>
STM m a -> m a
atomically STM m FetchMode
getFetchMode
      case FetchMode
mode of
           FetchMode
FetchModeDeadline -> StdGen -> DiffTime -> m StdGen
longDelay StdGen
rng DiffTime
execTime
           FetchMode
FetchModeBulkSync -> StdGen -> DiffTime -> m StdGen
shortDelay StdGen
rng DiffTime
execTime

    fuzzyDelay' :: DiffTime -> Double -> StdGen -> DiffTime -> m StdGen
    fuzzyDelay' :: DiffTime -> Double -> StdGen -> DiffTime -> m StdGen
fuzzyDelay' DiffTime
baseDelay Double
maxFuzz StdGen
rng DiffTime
execTime = do
      let (Double
fuzz, StdGen
rng') = (Double, Double) -> StdGen -> (Double, StdGen)
forall a g. (Random a, RandomGen g) => (a, a) -> g -> (a, g)
randomR (Double
0, Double
maxFuzz) StdGen
rng
          delay :: DiffTime
delay = Double -> DiffTime
forall a b. (Real a, Fractional b) => a -> b
realToFrac Double
fuzz DiffTime -> DiffTime -> DiffTime
forall a. Num a => a -> a -> a
+ DiffTime
baseDelay DiffTime -> DiffTime -> DiffTime
forall a. Num a => a -> a -> a
- DiffTime
execTime
      Tracer m (TracePeerSelection peeraddr)
-> TracePeerSelection peeraddr -> m ()
forall (m :: * -> *) a. Tracer m a -> a -> m ()
traceWith Tracer m (TracePeerSelection peeraddr)
tracer (TracePeerSelection peeraddr -> m ())
-> TracePeerSelection peeraddr -> m ()
forall a b. (a -> b) -> a -> b
$ DiffTime -> TracePeerSelection peeraddr
forall peeraddr. DiffTime -> TracePeerSelection peeraddr
TraceChurnWait DiffTime
delay
      DiffTime -> m ()
forall (m :: * -> *). MonadDelay m => DiffTime -> m ()
threadDelay DiffTime
delay
      StdGen -> m StdGen
forall (m :: * -> *) a. Monad m => a -> m a
return StdGen
rng'


    longDelay :: StdGen -> DiffTime -> m StdGen
    longDelay :: StdGen -> DiffTime -> m StdGen
longDelay = DiffTime -> Double -> StdGen -> DiffTime -> m StdGen
fuzzyDelay' DiffTime
churnInterval Double
600


    shortDelay :: StdGen -> DiffTime -> m StdGen
    shortDelay :: StdGen -> DiffTime -> m StdGen
shortDelay = DiffTime -> Double -> StdGen -> DiffTime -> m StdGen
fuzzyDelay' DiffTime
churnIntervalBulk Double
60

    -- The min time between running the churn governor.
    churnInterval :: DiffTime
    churnInterval :: DiffTime
churnInterval = DiffTime
3300

    churnIntervalBulk :: DiffTime
    churnIntervalBulk :: DiffTime
churnIntervalBulk = DiffTime
300

    -- Replace 20% or at least on peer every churnInterval.
    decrease :: Int -> Int
    decrease :: Int -> Int
decrease Int
v = Int
v  Int -> Int -> Int
forall a. Num a => a -> a -> a
- Int -> Int -> Int
forall a. Ord a => a -> a -> a
max Int
1 (Int
v Int -> Int -> Int
forall a. Integral a => a -> a -> a
`div` Int
5)