-
Notifications
You must be signed in to change notification settings - Fork 156
/
Sync.hs
535 lines (499 loc) · 21.2 KB
/
Sync.hs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
{-# LANGUAGE AllowAmbiguousTypes #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE NumericUnderscores #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeApplications #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE NoImplicitPrelude #-}
module Cardano.DbSync.Sync (
ConfigFile (..),
SyncCommand (..),
SyncNodeParams (..),
GenesisFile (..),
LedgerStateDir (..),
NetworkName (..),
SocketPath (..),
MetricSetters (..),
nullMetricSetters,
SyncEnv (..),
configureLogging,
runSyncNode,
) where
import Cardano.BM.Data.Tracer (ToLogObject (..), ToObject)
import Cardano.BM.Trace (Trace, appendName, logInfo, logWarning)
import qualified Cardano.BM.Trace as Logging
import Cardano.Client.Subscription (subscribe)
import qualified Cardano.Crypto as Crypto
import Cardano.Db (runDbIohkLogging)
import qualified Cardano.Db as Db
import Cardano.DbSync.Api
import Cardano.DbSync.Config
import Cardano.DbSync.Database
import Cardano.DbSync.DbAction
import Cardano.DbSync.Epoch
import Cardano.DbSync.Era
import Cardano.DbSync.Error
import Cardano.DbSync.Fix.PlutusDataBytes
import Cardano.DbSync.LocalStateQuery
import Cardano.DbSync.Metrics
import Cardano.DbSync.Tracing.ToObjectOrphans ()
import Cardano.DbSync.Types
import Cardano.DbSync.Util
import Cardano.Prelude hiding (Meta, Nat, option, (%))
import Cardano.Slotting.Slot (EpochNo (..), WithOrigin (..))
import qualified Codec.CBOR.Term as CBOR
import Control.Monad.Trans.Except.Exit (orDie)
import Control.Tracer (Tracer)
import qualified Data.ByteString.Lazy as BSL
import Data.Functor.Contravariant (contramap)
import qualified Data.List as List
import qualified Data.Text as Text
import Database.Persist.Postgresql (ConnectionString, SqlBackend, withPostgresqlConn)
import Network.Mux (MuxTrace, WithMuxBearer)
import Network.Mux.Types (MuxMode (..))
import Network.TypedProtocol.Pipelined (N (..), Nat (Succ, Zero))
import Ouroboros.Consensus.Block.Abstract (CodecConfig)
import Ouroboros.Consensus.Byron.Node ()
import Ouroboros.Consensus.Cardano.Node ()
import Ouroboros.Consensus.Config (configCodec)
import qualified Ouroboros.Consensus.HardFork.Simple as HardFork
import Ouroboros.Consensus.Network.NodeToClient (
ClientCodecs,
Codecs' (..),
cChainSyncCodec,
cStateQueryCodec,
cTxSubmissionCodec,
)
import Ouroboros.Consensus.Node.ErrorPolicy (consensusErrorPolicy)
import qualified Ouroboros.Consensus.Node.ProtocolInfo as Consensus
import Ouroboros.Network.Block (
BlockNo (..),
Point (..),
Tip (..),
blockNo,
genesisPoint,
getTipBlockNo,
)
import Ouroboros.Network.Driver (runPeer)
import Ouroboros.Network.Driver.Simple (runPipelinedPeer)
import Ouroboros.Network.Mux (MuxPeer (..), RunMiniProtocol (..))
import Ouroboros.Network.NodeToClient (
ClientSubscriptionParams (..),
ConnectionId,
ErrorPolicyTrace (..),
Handshake,
IOManager,
LocalAddress,
NetworkSubscriptionTracers (..),
NodeToClientProtocols (..),
TraceSendRecv,
WithAddr (..),
localSnocket,
localStateQueryPeerNull,
localTxMonitorPeerNull,
localTxSubmissionPeerNull,
networkErrorPolicies,
)
import qualified Ouroboros.Network.NodeToClient.Version as Network
import Ouroboros.Network.Protocol.ChainSync.Client (ChainSyncClient)
import qualified Ouroboros.Network.Protocol.ChainSync.Client as Client
import Ouroboros.Network.Protocol.ChainSync.ClientPipelined (
ChainSyncClientPipelined (..),
ClientPipelinedStIdle (..),
ClientPipelinedStIntersect (..),
ClientStNext (..),
chainSyncClientPeerPipelined,
recvMsgIntersectFound,
recvMsgIntersectNotFound,
recvMsgRollBackward,
recvMsgRollForward,
)
import Ouroboros.Network.Protocol.ChainSync.PipelineDecision (
MkPipelineDecision,
PipelineDecision (..),
pipelineDecisionLowHighMark,
runPipelineDecision,
)
import Ouroboros.Network.Protocol.ChainSync.Type (ChainSync)
import Ouroboros.Network.Protocol.LocalStateQuery.Client (localStateQueryClientPeer)
import qualified Ouroboros.Network.Snocket as Snocket
import Ouroboros.Network.Subscription (SubscriptionTrace)
import System.Directory (createDirectoryIfMissing)
runSyncNode ::
MetricSetters ->
Trace IO Text ->
IOManager ->
Bool ->
Word64 ->
Word64 ->
ConnectionString ->
Bool ->
RunMigration ->
SyncNodeParams ->
IO ()
runSyncNode metricsSetters trce iomgr aop snEveryFollowing snEveryLagging dbConnString ranAll runMigration enp = do
let configFile = enpConfigFile enp
enc <- readSyncNodeConfig configFile
createDirectoryIfMissing True (unLedgerStateDir $ enpLedgerStateDir enp)
logInfo trce $ "Using byron genesis file from: " <> (show . unGenesisFile $ dncByronGenesisFile enc)
logInfo trce $ "Using shelley genesis file from: " <> (show . unGenesisFile $ dncShelleyGenesisFile enc)
logInfo trce $ "Using alonzo genesis file from: " <> (show . unGenesisFile $ dncAlonzoGenesisFile enc)
orDie renderSyncNodeError $ do
genCfg <- readCardanoGenesisConfig enc
logProtocolMagicId trce $ genesisProtocolMagicId genCfg
syncEnv <-
ExceptT $
mkSyncEnvFromConfig
trce
dbConnString
syncOptions
(enpLedgerStateDir enp)
genCfg
ranAll
(enpForceIndexes enp)
runMigration
-- If the DB is empty it will be inserted, otherwise it will be validated (to make
-- sure we are on the right chain).
lift $ Db.runIohkLogging trce $ withPostgresqlConn dbConnString $ \backend -> do
liftIO $ unless (enpHasLedger enp) $ do
logInfo trce "Migrating to a no ledger schema"
Db.noLedgerMigrations backend trce
lift $ orDie renderSyncNodeError $ insertValidateGenesisDist trce backend (dncNetworkName enc) genCfg (useShelleyInit enc)
liftIO $ epochStartup (enpExtended enp) trce backend
case genCfg of
GenesisCardano {} -> do
liftIO $ runSyncNodeClient metricsSetters syncEnv iomgr trce (enpSocketPath enp)
where
useShelleyInit :: SyncNodeConfig -> Bool
useShelleyInit cfg =
case dncShelleyHardFork cfg of
HardFork.TriggerHardForkAtEpoch (EpochNo 0) -> True
_ -> False
insertOptions = defaultInsertOptions
syncOptions =
SyncOptions (enpExtended enp) aop (enpHasCache enp) (enpHasLedger enp) (enpSkipFix enp) (enpOnlyFix enp) insertOptions snEveryFollowing snEveryLagging
runSyncNodeClient ::
MetricSetters ->
SyncEnv ->
IOManager ->
Trace IO Text ->
SocketPath ->
IO ()
runSyncNodeClient metricsSetters env iomgr trce (SocketPath socketPath) = do
logInfo trce $ "localInitiatorNetworkApplication: connecting to node via " <> textShow socketPath
void $
subscribe
(localSnocket iomgr)
codecConfig
(envNetworkMagic env)
networkSubscriptionTracers
clientSubscriptionParams
(dbSyncProtocols trce env metricsSetters)
where
codecConfig :: CodecConfig CardanoBlock
codecConfig = configCodec $ Consensus.pInfoConfig (leProtocolInfo $ envLedger env)
clientSubscriptionParams =
ClientSubscriptionParams
{ cspAddress = Snocket.localAddressFromPath socketPath
, cspConnectionAttemptDelay = Nothing
, cspErrorPolicies = networkErrorPolicies <> consensusErrorPolicy (Proxy @CardanoBlock)
}
networkSubscriptionTracers =
NetworkSubscriptionTracers
{ nsMuxTracer = muxTracer
, nsHandshakeTracer = handshakeTracer
, nsErrorPolicyTracer = errorPolicyTracer
, nsSubscriptionTracer = subscriptionTracer
}
errorPolicyTracer :: Tracer IO (WithAddr LocalAddress ErrorPolicyTrace)
errorPolicyTracer = toLogObject $ appendName "ErrorPolicy" trce
muxTracer :: (Show peer, ToObject peer) => Tracer IO (WithMuxBearer peer MuxTrace)
muxTracer = toLogObject $ appendName "Mux" trce
subscriptionTracer :: Tracer IO (Identity (SubscriptionTrace LocalAddress))
subscriptionTracer = toLogObject $ appendName "Subscription" trce
handshakeTracer ::
Tracer
IO
( WithMuxBearer
(ConnectionId LocalAddress)
(TraceSendRecv (Handshake Network.NodeToClientVersion CBOR.Term))
)
handshakeTracer = toLogObject $ appendName "Handshake" trce
dbSyncProtocols ::
Trace IO Text ->
SyncEnv ->
MetricSetters ->
Network.NodeToClientVersion ->
ClientCodecs CardanoBlock IO ->
ConnectionId LocalAddress ->
NodeToClientProtocols 'InitiatorMode BSL.ByteString IO () Void
dbSyncProtocols trce env metricsSetters _version codecs _connectionId =
NodeToClientProtocols
{ localChainSyncProtocol = localChainSyncPtcl
, localTxSubmissionProtocol = dummylocalTxSubmit
, localStateQueryProtocol = localStateQuery
, localTxMonitorProtocol =
InitiatorProtocolOnly $ MuxPeer Logging.nullTracer (cTxMonitorCodec codecs) localTxMonitorPeerNull
}
where
localChainSyncTracer :: Tracer IO (TraceSendRecv (ChainSync CardanoBlock (Point CardanoBlock) (Tip CardanoBlock)))
localChainSyncTracer = toLogObject $ appendName "ChainSync" trce
localChainSyncPtcl :: RunMiniProtocol 'InitiatorMode BSL.ByteString IO () Void
localChainSyncPtcl = InitiatorProtocolOnly $ MuxPeerRaw $ \channel ->
liftIO . logException trce "ChainSyncWithBlocksPtcl: " $ do
Db.runIohkLogging trce $ withPostgresqlConn (envConnString env) $ \backend -> liftIO $ do
replaceConnection env backend
setConsistentLevel env Unchecked
isFixed <- getIsSyncFixed env
let skipFix = soptSkipFix $ envOptions env
let onlyFix = soptOnlyFix $ envOptions env
if onlyFix || (not isFixed && not skipFix)
then do
fd <- runDbIohkLogging backend (getTrace env) $ getWrongPlutusData (getTrace env)
unless (nullData fd) $
void $
runPeer
localChainSyncTracer
(cChainSyncCodec codecs)
channel
( Client.chainSyncClientPeer $
chainSyncClientFix backend (getTrace env) fd
)
setIsFixedAndMigrate env
when onlyFix $ panic "All Good! This error is only thrown to exit db-sync." -- TODO fix.
else do
when skipFix $ setIsFixedAndMigrate env
-- The Db thread is not forked at this point, so we can use
-- the connection here. A connection cannot be used concurrently by many
-- threads
logInfo trce "Starting chainSyncClient"
latestPoints <- getLatestPoints env
let (inMemory, onDisk) = List.span snd latestPoints
logInfo trce $
mconcat
[ "Suggesting intersection points from memory: "
, textShow (fst <$> inMemory)
, " and from disk: "
, textShow (fst <$> onDisk)
]
currentTip <- getCurrentTipBlockNo env
logDbState env
-- communication channel between datalayer thread and chainsync-client thread
actionQueue <- newDbActionQueue
race_
( race
(runDbThread env metricsSetters actionQueue)
(runOfflineFetchThread trce env)
)
( runPipelinedPeer
localChainSyncTracer
(cChainSyncCodec codecs)
channel
( chainSyncClientPeerPipelined $
chainSyncClient metricsSetters trce (fst <$> latestPoints) currentTip actionQueue
)
)
atomically $ writeDbActionQueue actionQueue DbFinish
-- We should return leftover bytes returned by 'runPipelinedPeer', but
-- client application do not care about them (it's only important if one
-- would like to restart a protocol on the same mux and thus bearer).
pure ()
pure ((), Nothing)
dummylocalTxSubmit :: RunMiniProtocol 'InitiatorMode BSL.ByteString IO () Void
dummylocalTxSubmit =
InitiatorProtocolOnly $
MuxPeer
Logging.nullTracer
(cTxSubmissionCodec codecs)
localTxSubmissionPeerNull
localStateQuery :: RunMiniProtocol 'InitiatorMode BSL.ByteString IO () Void
localStateQuery =
InitiatorProtocolOnly $
MuxPeer
(if hasLedgerState env then Logging.nullTracer else contramap (Text.pack . show) . toLogObject $ appendName "local-state-query" trce)
(cStateQueryCodec codecs)
(if hasLedgerState env then localStateQueryPeerNull else localStateQueryClientPeer (localStateQueryHandler (envNoLedgerEnv env)))
-- | 'ChainSyncClient' which traces received blocks and ignores when it
-- receives a request to rollbackwar. A real wallet client should:
--
-- * at startup send the list of points of the chain to help synchronise with
-- the node;
-- * update its state when the client receives next block or is requested to
-- rollback, see 'clientStNext' below.
--
-- When an intersect with the node is found, we are sure that the next message
-- will trigger the 'recvMsgRollBackward', so this is where we actually handle
-- any necessary rollback. This means that at this point, the 'currentTip' may not
-- be correct. This is not an issue, because we only use it for performance reasons
-- in the pipeline policy.
chainSyncClient ::
MetricSetters ->
Trace IO Text ->
[Point CardanoBlock] ->
WithOrigin BlockNo ->
DbActionQueue ->
ChainSyncClientPipelined CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO ()
chainSyncClient metricsSetters trce latestPoints currentTip actionQueue = do
ChainSyncClientPipelined $ pure $ clientPipelinedStIdle currentTip latestPoints
where
clientPipelinedStIdle ::
WithOrigin BlockNo ->
[CardanoPoint] ->
ClientPipelinedStIdle 'Z CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO ()
clientPipelinedStIdle clientTip points =
-- Notify the core node about the our latest points at which we are
-- synchronised. This client is not persistent and thus it just
-- synchronises from the genesis block. A real implementation should send
-- a list of points up to a point which is k blocks deep.
SendMsgFindIntersect
(if null points then [genesisPoint] else points)
ClientPipelinedStIntersect
{ recvMsgIntersectFound = \_hdr tip -> pure $ goTip policy Zero clientTip tip Nothing
, recvMsgIntersectNotFound = \tip -> pure $ goTip policy Zero clientTip tip Nothing
}
policy :: MkPipelineDecision
policy = pipelineDecisionLowHighMark 1 50
goTip ::
MkPipelineDecision ->
Nat n ->
WithOrigin BlockNo ->
Tip CardanoBlock ->
Maybe [CardanoPoint] ->
ClientPipelinedStIdle n CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO ()
goTip mkPipelineDecision n clientTip serverTip =
go mkPipelineDecision n clientTip (getTipBlockNo serverTip)
go ::
MkPipelineDecision ->
Nat n ->
WithOrigin BlockNo ->
WithOrigin BlockNo ->
Maybe [CardanoPoint] ->
ClientPipelinedStIdle n CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO ()
go mkPipelineDecision n clientTip serverTip mPoint =
case (mPoint, n, runPipelineDecision mkPipelineDecision n clientTip serverTip) of
(Just points, _, _) -> drainThePipe n $ clientPipelinedStIdle clientTip points
(_, _Zero, (Request, mkPipelineDecision')) ->
SendMsgRequestNext clientStNext (pure clientStNext)
where
clientStNext = mkClientStNext $ goTip mkPipelineDecision' n
(_, _, (Pipeline, mkPipelineDecision')) ->
SendMsgRequestNextPipelined
(go mkPipelineDecision' (Succ n) clientTip serverTip Nothing)
(_, Succ n', (CollectOrPipeline, mkPipelineDecision')) ->
CollectResponse
(Just . pure $ SendMsgRequestNextPipelined $ go mkPipelineDecision' (Succ n) clientTip serverTip Nothing)
(mkClientStNext $ goTip mkPipelineDecision' n')
(_, Succ n', (Collect, mkPipelineDecision')) ->
CollectResponse
Nothing
(mkClientStNext $ goTip mkPipelineDecision' n')
mkClientStNext ::
( WithOrigin BlockNo ->
Tip CardanoBlock ->
Maybe [CardanoPoint] ->
ClientPipelinedStIdle n CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO ()
) ->
ClientStNext n CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO ()
mkClientStNext finish =
ClientStNext
{ recvMsgRollForward = \blk tip ->
logException trce "recvMsgRollForward: " $ do
setNodeBlockHeight metricsSetters (getTipBlockNo tip)
newSize <- atomically $ do
writeDbActionQueue actionQueue $ mkDbApply blk
lengthDbActionQueue actionQueue
setDbQueueLength metricsSetters newSize
pure $ finish (At (blockNo blk)) tip Nothing
, recvMsgRollBackward = \point tip ->
logException trce "recvMsgRollBackward: " $ do
-- This will get the current tip rather than what we roll back to
-- but will only be incorrect for a short time span.
(mPoints, newTip) <- waitRollback actionQueue point tip
pure $ finish newTip tip mPoints
}
logProtocolMagicId :: Trace IO Text -> Crypto.ProtocolMagicId -> ExceptT SyncNodeError IO ()
logProtocolMagicId tracer pm =
liftIO . logInfo tracer $
mconcat
[ "NetworkMagic: "
, textShow (Crypto.unProtocolMagicId pm)
]
drainThePipe ::
Nat n ->
ClientPipelinedStIdle 'Z CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO () ->
ClientPipelinedStIdle n CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO ()
drainThePipe n0 client = go n0
where
go ::
forall n'.
Nat n' ->
ClientPipelinedStIdle n' CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO ()
go n =
case n of
Zero -> client
Succ n' ->
CollectResponse Nothing $
ClientStNext
{ recvMsgRollForward = \_hdr _tip -> pure $ go n'
, recvMsgRollBackward = \_pt _tip -> pure $ go n'
}
chainSyncClientFix ::
SqlBackend -> Trace IO Text -> FixData -> ChainSyncClient CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO ()
chainSyncClientFix backend tracer fixData = Client.ChainSyncClient $ do
liftIO $ logInfo tracer "Starting chainsync to fix Plutus Data. This will update database values in tables datum and redeemer_data."
clientStIdle True (sizeFixData fixData) fixData
where
updateSizeAndLog :: Int -> Int -> IO Int
updateSizeAndLog lastSize currentSize = do
let diffSize = lastSize - currentSize
if lastSize >= currentSize && diffSize >= 200_000
then do
liftIO $ logInfo tracer $ mconcat ["Fixed ", textShow (sizeFixData fixData - currentSize), " Plutus Data"]
pure currentSize
else pure lastSize
clientStIdle :: Bool -> Int -> FixData -> IO (Client.ClientStIdle CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO ())
clientStIdle shouldLog lastSize fds = do
case spanOnNextPoint fds of
Nothing -> do
liftIO $ logInfo tracer "Finished chainsync to fix Plutus Data."
pure $ Client.SendMsgDone ()
Just (point, fdOnPoint, fdRest) -> do
when shouldLog $
liftIO $
logInfo tracer $
mconcat ["Starting fixing Plutus Data ", textShow point]
newLastSize <- liftIO $ updateSizeAndLog lastSize (sizeFixData fds)
let clientStIntersect =
Client.ClientStIntersect
{ Client.recvMsgIntersectFound = \_pnt _tip ->
Client.ChainSyncClient $
pure $
Client.SendMsgRequestNext (clientStNext newLastSize fdOnPoint fdRest) (pure $ clientStNext newLastSize fdOnPoint fdRest)
, Client.recvMsgIntersectNotFound = \tip -> Client.ChainSyncClient $ do
liftIO $
logWarning tracer $
mconcat
[ "Node can't find block "
, textShow point
, ". It's probably behind, at "
, textShow tip
, ". Sleeping for 3 mins and retrying.."
]
threadDelay $ 180 * 1_000_000
pure $ Client.SendMsgFindIntersect [point] clientStIntersect
}
pure $ Client.SendMsgFindIntersect [point] clientStIntersect
clientStNext :: Int -> FixData -> FixData -> Client.ClientStNext CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO ()
clientStNext lastSize fdOnPoint fdRest =
Client.ClientStNext
{ Client.recvMsgRollForward = \blk _tip -> Client.ChainSyncClient $ do
runDbIohkLogging backend tracer $ fixPlutusData tracer blk fdOnPoint
clientStIdle False lastSize fdRest
, Client.recvMsgRollBackward = \_point _tip ->
Client.ChainSyncClient $
pure $
Client.SendMsgRequestNext (clientStNext lastSize fdOnPoint fdRest) (pure $ clientStNext lastSize fdOnPoint fdRest)
}