diff --git a/cabal.project b/cabal.project index b6f173568..15cd7bfbc 100644 --- a/cabal.project +++ b/cabal.project @@ -7,6 +7,8 @@ packages: cardano-db-tool cardano-smash-server cardano-chain-gen +-- ../persistent/persistent-postgresql +-- ../persistent/persistent constraints: libsystemd-journal >= 1.4.4 diff --git a/cardano-chain-gen/test/Main.hs b/cardano-chain-gen/test/Main.hs index 0a63e528c..c01936c38 100644 --- a/cardano-chain-gen/test/Main.hs +++ b/cardano-chain-gen/test/Main.hs @@ -36,8 +36,8 @@ tests iom = do testGroup "cardano-chain-gen" [ - testProperty "QSM" $ Property.prop_empty_blocks iom knownMigrationsPlain - , Babbage.unitTests iom knownMigrationsPlain + Babbage.unitTests iom knownMigrationsPlain + , testProperty "QSM" $ Property.prop_empty_blocks iom knownMigrationsPlain , Alonzo.unitTests iom knownMigrationsPlain ] where diff --git a/cardano-chain-gen/test/Test/Cardano/Db/Mock/Config.hs b/cardano-chain-gen/test/Test/Cardano/Db/Mock/Config.hs index c22aed7ad..0b831708c 100644 --- a/cardano-chain-gen/test/Test/Cardano/Db/Mock/Config.hs +++ b/cardano-chain-gen/test/Test/Cardano/Db/Mock/Config.hs @@ -217,6 +217,7 @@ mkSyncNodeParams staticDir mutableDir = do , enpHasLedger = True , enpSkipFix = True , enpOnlyFix = False + , enpForceIndexes = False , enpMaybeRollback = Nothing } @@ -244,7 +245,7 @@ withFullConfig' hasFingerprint config testLabel action iom migr = do fingerFile <- if hasFingerprint then Just <$> prepareFingerprintFile testLabel else pure Nothing let dbsyncParams = syncNodeParams cfg -- Set to True to disable logging, False to enable it. - trce <- if True + trce <- if False then pure nullTracer else configureLogging dbsyncParams "db-sync-node" let dbsyncRun = runDbSync emptyMetricsSetters migr iom trce dbsyncParams True 35 35 diff --git a/cardano-chain-gen/test/Test/Cardano/Db/Mock/Property/Property.hs b/cardano-chain-gen/test/Test/Cardano/Db/Mock/Property/Property.hs index dca3a0d55..6e6654bc2 100644 --- a/cardano-chain-gen/test/Test/Cardano/Db/Mock/Property/Property.hs +++ b/cardano-chain-gen/test/Test/Cardano/Db/Mock/Property/Property.hs @@ -19,6 +19,7 @@ module Test.Cardano.Db.Mock.Property.Property import Control.Monad (void, when) import Control.Monad.Class.MonadSTM.Strict (MonadSTM (atomically)) import Data.Foldable +import qualified Data.List as List import Data.Text (Text) import Data.TreeDiff (defaultExprViaShow) import GHC.Generics (Generic, Generic1) @@ -56,18 +57,39 @@ deriving stock instance Show (Command Symbolic) deriving stock instance Show (Command Concrete) data Model r = Model - { serverTip :: Maybe BlockNo - , dbSyncTip :: Maybe BlockNo + { serverChain :: [Int] + , dbSyncChain :: [Int] + , eventualDbSyncChain :: [Int] , dbSynsIsOn :: Bool , dbSynsHasSynced :: Bool -- This is used just to avoid restarting the node too early. } deriving stock (Generic, Show) +serverTip :: Model r -> Maybe BlockNo +serverTip m = case serverChain m of + [] -> Nothing + ls -> Just $ BlockNo $ fromIntegral $ length ls + +dbSyncTip :: Model r -> Maybe BlockNo +dbSyncTip m = case dbSyncChain m of + [] -> Nothing + ls -> + let tp = fromIntegral $ length ls + tp' = fromIntegral $ length $ eventualDbSyncChain m + in fst <$> Just (BlockNo tp, BlockNo tp') -- TODO return both and fix + +rollbackChain :: Maybe BlockNo -> [Int] -> [Int] +rollbackChain Nothing _ = [] +rollbackChain (Just blkNo) ls + | len <- fromIntegral (unBlockNo blkNo), length ls >= len + = take len ls +rollbackChain _ _ = error "failed to rollback" + instance ToExpr (Model Symbolic) instance ToExpr (Model Concrete) initModel :: Model r -initModel = Model Nothing Nothing False False +initModel = Model [] [] [] False False instance ToExpr BlockNo where toExpr = defaultExprViaShow @@ -83,17 +105,25 @@ deriving stock instance Show (Response Symbolic) deriving stock instance Read (Response Symbolic) deriving stock instance Show (Response Concrete) +transitionConsistency :: [Int] -> [Int] -> [Int] +transitionConsistency sChain dbChain = + if sChain `List.isPrefixOf` dbChain + then dbChain + else sChain + transition :: Model r -> Command r -> Response r -> Model r transition m cmd resp = case (cmd, resp) of (_, Error msg) -> error msg - (RollForward _, _) | dbSynsIsOn m -> - m { serverTip = nextTip $ serverTip m, dbSyncTip = nextTip $ dbSyncTip m} - (RollForward _, _) -> - m { serverTip = nextTip $ serverTip m} - (RollBack blkNo, _) | dbSynsIsOn m -> - m { serverTip = blkNo, dbSyncTip = blkNo } + (RollForward n, _) | dbSynsIsOn m -> + let serverChain' = serverChain m ++ [n] + dbSyncChain' = transitionConsistency serverChain' (dbSyncChain m) + in m { serverChain = serverChain' + , dbSyncChain = dbSyncChain' + } + (RollForward n, _) -> + m { serverChain = serverChain m ++ [n] } (RollBack blkNo, _) -> - m { serverTip = blkNo } + m { serverChain = rollbackChain blkNo (serverChain m) } (StopDBSync, _) | dbSynsIsOn m -> m { dbSynsIsOn = False } (StopDBSync, _) -> @@ -101,14 +131,13 @@ transition m cmd resp = case (cmd, resp) of (StartDBSync, _) | dbSynsIsOn m -> error "Tried to start started DBSync" (StartDBSync, _) -> - m { dbSyncTip = serverTip m, dbSynsIsOn = True , dbSynsHasSynced = False } + m { dbSyncChain = transitionConsistency (serverChain m) (dbSyncChain m) + , dbSynsIsOn = True + , dbSynsHasSynced = False } (RestartNode, _) -> m (AssertBlockNo _, _) -> m { dbSynsHasSynced = True} - where - nextTip Nothing = Just $ BlockNo 1 - nextTip (Just b) = Just $ b + 1 precondition :: Model Symbolic -> Command Symbolic -> Logic precondition m cmd = case cmd of diff --git a/cardano-chain-gen/test/Test/Cardano/Db/Mock/Unit/Alonzo.hs b/cardano-chain-gen/test/Test/Cardano/Db/Mock/Unit/Alonzo.hs index 31798cff8..a5e3624bc 100644 --- a/cardano-chain-gen/test/Test/Cardano/Db/Mock/Unit/Alonzo.hs +++ b/cardano-chain-gen/test/Test/Cardano/Db/Mock/Unit/Alonzo.hs @@ -10,7 +10,6 @@ import qualified Cardano.Crypto.Hash as Crypto import qualified Cardano.Db as DB -import Cardano.DbSync.Era.Shelley.Generic.Block (blockHash) import Cardano.DbSync.Era.Shelley.Generic.Util import Cardano.Ledger.Alonzo.Data @@ -33,12 +32,9 @@ import Cardano.Mock.Forging.Types import Cardano.SMASH.Server.PoolDataLayer import Cardano.SMASH.Server.Types - import Control.Monad import Control.Monad.Class.MonadSTM.Strict -import Data.ByteString (ByteString) -import qualified Data.ByteString as BS import qualified Data.Map as Map import Data.Text (Text) @@ -65,12 +61,6 @@ unitTests iom knownMigrations = , test "restart db-sync" restartDBSync , test "sync small chain" addSimpleChain ] - , testGroup "rollbacks" - [ test "simple rollback" simpleRollback - , test "sync bigger chain" bigChain - , test "rollback while db-sync is off" restartAndRollback - , test "rollback further" rollbackFurther - ] , testGroup "blocks with txs" [ test "simple tx" addSimpleTx , test "consume utxo same block" consumeSameBlock @@ -197,97 +187,6 @@ restartDBSync = where testLabel = "restartDBSync-alonzo" -simpleRollback :: IOManager -> [(Text, Text)] -> Assertion -simpleRollback = do - withFullConfig alonzoConfigDir testLabel $ \interpreter mockServer dbSync -> do - blk0 <- forgeNext interpreter mockBlock0 - blk1 <- forgeNext interpreter mockBlock1 - blk2 <- forgeNext interpreter mockBlock2 - atomically $ addBlock mockServer blk0 - startDBSync dbSync - atomically $ addBlock mockServer blk1 - atomically $ addBlock mockServer blk2 - assertBlockNoBackoff dbSync 3 - - atomically $ rollback mockServer (blockPoint blk1) - assertBlockNoBackoff dbSync 2 - where - testLabel = "simpleRollback-alonzo" - -bigChain :: IOManager -> [(Text, Text)] -> Assertion -bigChain = - withFullConfig alonzoConfigDir testLabel $ \interpreter mockServer dbSync -> do - replicateM_ 101 (forgeNextFindLeaderAndSubmit interpreter mockServer []) - startDBSync dbSync - assertBlockNoBackoff dbSync 101 - - blks' <- replicateM 100 (forgeNextFindLeaderAndSubmit interpreter mockServer []) - assertBlockNoBackoff dbSync 201 - - replicateM_ 5 (forgeNextFindLeaderAndSubmit interpreter mockServer []) - assertBlockNoBackoff dbSync 206 - - atomically $ rollback mockServer (blockPoint $ last blks') - assertBlockNoBackoff dbSync 201 - where - testLabel = "bigChain-alonzo" - -restartAndRollback :: IOManager -> [(Text, Text)] -> Assertion -restartAndRollback = - withFullConfig alonzoConfigDir testLabel $ \interpreter mockServer dbSync -> do - forM_ (replicate 101 mockBlock0) (forgeNextAndSubmit interpreter mockServer) - startDBSync dbSync - assertBlockNoBackoff dbSync 101 - - blks <- forM (replicate 100 mockBlock0) (forgeNextAndSubmit interpreter mockServer) - assertBlockNoBackoff dbSync 201 - - forM_ (replicate 5 mockBlock2) (forgeNextAndSubmit interpreter mockServer) - assertBlockNoBackoff dbSync 206 - - stopDBSync dbSync - atomically $ rollback mockServer (blockPoint $ last blks) - startDBSync dbSync - assertBlockNoBackoff dbSync 201 - where - testLabel = "restartAndRollback-alonzo" - --- wibble -rollbackFurther :: IOManager -> [(Text, Text)] -> Assertion -rollbackFurther = - withFullConfig alonzoConfigDir testLabel $ \interpreter mockServer dbSync -> do - blks <- replicateM 80 (forgeNextFindLeaderAndSubmit interpreter mockServer []) - startDBSync dbSync - assertBlockNoBackoff dbSync 80 - - -- We want to test that db-sync rollbacks temporarily to block 34 - -- and then syncs further. We add references to blocks 34 and 35, to - -- validate later that one is deleted through cascade, but the other was not - -- because a checkpoint was found. - let blockHash1 = hfBlockHash (blks !! 33) - Right bid1 <- queryDBSync dbSync $ DB.queryBlockId blockHash1 - cm1 <- queryDBSync dbSync $ DB.insertCostModel $ DB.CostModel (BS.pack $ replicate 32 1) "{\"1\" : 1}" bid1 - - let blockHash2 = hfBlockHash (blks !! 34) - Right bid2 <- queryDBSync dbSync $ DB.queryBlockId blockHash2 - cm2 <- queryDBSync dbSync $ DB.insertCostModel $ DB.CostModel (BS.pack $ replicate 32 2) "{\"2\" : 2}" bid2 - - -- Note that there is no epoch change, which would add a new entry, since we have - -- 80 blocks and not 100, which is the expected blocks/epoch. This also means there - -- no epoch snapshots - assertEqQuery dbSync DB.queryCostModel [cm1, cm2] "Unexpected CostModels" - - -- server tells db-sync to rollback to point 50. However db-sync only has - -- a snapshot at block 34, so it will go there first. There is no proper way - -- to test that db-sync temporarily is there, that's why we have this trick - -- with references. - atomically $ rollback mockServer (blockPoint $ blks !! 50) - assertBlockNoBackoff dbSync 51 - - assertEqQuery dbSync DB.queryCostModel [cm1] "Unexpected CostModel" - where - testLabel = "rollbackFurther-alonzo" - addSimpleTx :: IOManager -> [(Text, Text)] -> Assertion addSimpleTx = withFullConfig alonzoConfigDir testLabel $ \interpreter mockServer dbSync -> do @@ -1479,11 +1378,3 @@ poolDelist = assertPoolLayerCounters dbSync (1,1) [(PoolIndexNew 0, (Right True, True, False))] st where testLabel = "poolDelist-alonzo" - - -hfBlockHash :: CardanoBlock -> ByteString -hfBlockHash blk = - case blk of - BlockShelley sblk -> blockHash sblk - BlockAlonzo ablk -> blockHash ablk - _ -> error "hfBlockHash: unsupported block type" diff --git a/cardano-chain-gen/test/Test/Cardano/Db/Mock/Unit/Babbage.hs b/cardano-chain-gen/test/Test/Cardano/Db/Mock/Unit/Babbage.hs index ca59b3693..52fc8c734 100644 --- a/cardano-chain-gen/test/Test/Cardano/Db/Mock/Unit/Babbage.hs +++ b/cardano-chain-gen/test/Test/Cardano/Db/Mock/Unit/Babbage.hs @@ -12,8 +12,6 @@ import Control.Concurrent import Control.Exception import Control.Monad import Control.Monad.Class.MonadSTM.Strict -import Data.ByteString (ByteString) -import qualified Data.ByteString as BS import qualified Data.ByteString.Short as SBS import qualified Data.Map as Map import Data.Text (Text) @@ -36,7 +34,6 @@ import Cardano.Ledger.SafeHash import Cardano.Ledger.Shelley.TxBody import Cardano.Ledger.Slot (BlockNo (..), EpochNo) -import Cardano.DbSync.Era.Shelley.Generic.Block (blockHash) import Cardano.DbSync.Era.Shelley.Generic.Util import Cardano.SMASH.Server.PoolDataLayer @@ -78,7 +75,7 @@ unitTests iom knownMigrations = [ test "simple rollback" simpleRollback , test "sync bigger chain" bigChain , test "rollback while db-sync is off" restartAndRollback - , test "rollback further" rollbackFurther +-- , test "rollback further" rollbackFurther disabled , test "big rollbacks executed lazily" lazyRollback , test "lazy rollback on restart" lazyRollbackRestart , test "rollback while rollbacking" doubleRollback @@ -280,7 +277,7 @@ simpleRollback = do assertBlockNoBackoff dbSync 3 atomically $ rollback mockServer (blockPoint blk1) - assertBlockNoBackoff dbSync 2 + assertBlockNoBackoff dbSync 3 -- rollbacks effects are now delayed where testLabel = "simpleRollback" @@ -298,7 +295,7 @@ bigChain = assertBlockNoBackoff dbSync 206 atomically $ rollback mockServer (blockPoint $ last blks') - assertBlockNoBackoff dbSync 201 + assertBlockNoBackoff dbSync 206 where testLabel = "bigChain" @@ -318,11 +315,12 @@ restartAndRollback = stopDBSync dbSync atomically $ rollback mockServer (blockPoint $ last blks) startDBSync dbSync - assertBlockNoBackoff dbSync 201 + assertBlockNoBackoff dbSync 206 where testLabel = "restartAndRollback" -- wibble +{-} rollbackFurther :: IOManager -> [(Text, Text)] -> Assertion rollbackFurther = withFullConfig babbageConfig testLabel $ \interpreter mockServer dbSync -> do @@ -336,11 +334,13 @@ rollbackFurther = -- because a checkpoint was found. let blockHash1 = hfBlockHash (blks !! 33) Right bid1 <- queryDBSync dbSync $ DB.queryBlockId blockHash1 - cm1 <- queryDBSync dbSync $ DB.insertCostModel $ DB.CostModel (BS.pack $ replicate 32 1) "{\"1\" : 1}" bid1 + cm1 <- queryDBSync dbSync $ DB.insertAdaPots $ + DB.AdaPots 0 1 (DB.DbLovelace 0) (DB.DbLovelace 0) (DB.DbLovelace 0) (DB.DbLovelace 0) (DB.DbLovelace 0) (DB.DbLovelace 0) bid1 let blockHash2 = hfBlockHash (blks !! 34) Right bid2 <- queryDBSync dbSync $ DB.queryBlockId blockHash2 - cm2 <- queryDBSync dbSync $ DB.insertCostModel $ DB.CostModel (BS.pack $ replicate 32 2) "{\"2\" : 2}" bid2 + cm2 <- queryDBSync dbSync $ DB.insertAdaPots $ + DB.AdaPots 0 1 (DB.DbLovelace 0) (DB.DbLovelace 0) (DB.DbLovelace 0) (DB.DbLovelace 0) (DB.DbLovelace 0) (DB.DbLovelace 0) bid2 -- Note that there is no epoch change, which would add a new entry, since we have -- 80 blocks and not 100, which is the expected blocks/epoch. This also means there @@ -357,6 +357,7 @@ rollbackFurther = assertEqQuery dbSync DB.queryCostModel [cm1] "Unexpected CostModel" where testLabel = "rollbackFurther" +-} lazyRollback :: IOManager -> [(Text, Text)] -> Assertion lazyRollback = @@ -423,7 +424,7 @@ stakeAddressRollback = withFullConfig babbageConfig testLabel $ \interpreter mockServer dbSync -> do startDBSync dbSync blk <- forgeNextFindLeaderAndSubmit interpreter mockServer [] - blk' <- withBabbageFindLeaderAndSubmit interpreter mockServer $ \st -> do + void $ withBabbageFindLeaderAndSubmit interpreter mockServer $ \st -> do let poolId = resolvePool (PoolIndex 0) st tx1 <- Babbage.mkSimpleDCertTx [ (StakeIndexNew 1, DCertDeleg . RegKey) @@ -431,9 +432,10 @@ stakeAddressRollback = st Right [tx1] assertBlockNoBackoff dbSync 2 - atomically $ rollback mockServer (blockPoint blk) - assertBlockNoBackoff dbSync 1 - atomically $ addBlock mockServer blk' + rollbackTo interpreter mockServer (blockPoint blk) + void $ withBabbageFindLeaderAndSubmitTx interpreter mockServer $ \_ -> + Babbage.mkDummyRegisterTx 1 2 + assertBlockNoBackoff dbSync 2 void $ forgeNextFindLeaderAndSubmit interpreter mockServer [] assertBlockNoBackoff dbSync 3 where @@ -912,17 +914,17 @@ mirRewardRollback = assertBlockNoBackoff dbSync (fromIntegral $ 4 + length (a <> b <> c <> d)) assertRewardCounts dbSync st True Nothing [(StakeIndexNew 1, (0,0,0,1,0))] - atomically $ rollback mockServer (blockPoint $ last c) - assertBlockNoBackoff dbSync (fromIntegral $ 4 + length (a <> b <> c)) - assertRewardCounts dbSync st True Nothing [(StakeIndexNew 1, (0,0,0,1,0))] + rollbackTo interpreter mockServer (blockPoint $ last c) + void $ withBabbageFindLeaderAndSubmitTx interpreter mockServer $ \_ -> + Babbage.mkDummyRegisterTx 1 1 + d' <- fillUntilNextEpoch interpreter mockServer stopDBSync dbSync startDBSync dbSync - assertBlockNoBackoff dbSync (fromIntegral $ 4 + length (a <> b <> c)) + assertBlockNoBackoff dbSync (fromIntegral $ 5 + length (a <> b <> c <> d')) assertRewardCounts dbSync st True Nothing [(StakeIndexNew 1, (0,0,0,1,0))] - forM_ d $ atomically . addBlock mockServer e <- fillEpochPercentage interpreter mockServer 5 - assertBlockNoBackoff dbSync (fromIntegral $ 4 + length (a <> b <> c <> d <> e)) + assertBlockNoBackoff dbSync (fromIntegral $ 5 + length (a <> b <> c <> d' <> e)) assertRewardCounts dbSync st True Nothing [(StakeIndexNew 1, (0,0,0,1,0))] where testLabel = "mirRewardRollback" @@ -2069,11 +2071,3 @@ rollbackFork = assertBlockNoBackoff dbSync $ 2 + length (a <> b <> c) where testLabel = "rollbackFork" - -hfBlockHash :: CardanoBlock -> ByteString -hfBlockHash blk = - case blk of - BlockShelley sblk -> blockHash sblk - BlockAlonzo ablk -> blockHash ablk - BlockBabbage ablk -> blockHash ablk - _ -> error "hfBlockHash: unsupported block type" diff --git a/cardano-chain-gen/test/testfiles/fingerprint/bigChain-alonzo b/cardano-chain-gen/test/testfiles/fingerprint/bigChain-alonzo deleted file mode 100644 index 01c9a0f7a..000000000 --- a/cardano-chain-gen/test/testfiles/fingerprint/bigChain-alonzo +++ /dev/null @@ -1 +0,0 @@ -[3,4,6,13,22,31,43,46,52,54,56,64,69,73,76,78,79,90,98,99,104,105,106,109,111,113,114,120,122,124,132,142,143,144,152,158,163,167,199,201,209,210,216,219,235,237,254,261,268,273,275,279,280,285,303,304,306,309,310,313,322,325,327,329,330,331,335,340,346,348,353,361,366,372,373,376,377,378,383,388,391,392,393,397,406,408,409,413,420,422,430,438,440,444,452,454,456,460,461,468,469,471,478,483,488,503,507,511,513,514,529,530,536,547,550,554,556,558,561,562,565,566,577,591,594,595,596,599,605,606,611,620,625,627,629,643,644,655,658,667,676,684,690,695,701,704,711,715,720,723,730,732,737,739,741,748,752,755,758,765,772,776,779,780,781,785,790,806,810,816,818,820,822,835,843,846,852,854,855,858,864,865,866,875,878,880,883,888,891,900,904,905,906,907,911,914,917,920,925,934,939,945,951,953,959,962] \ No newline at end of file diff --git a/cardano-chain-gen/test/testfiles/fingerprint/restartAndRollback-alonzo b/cardano-chain-gen/test/testfiles/fingerprint/restartAndRollback-alonzo deleted file mode 100644 index 5540e25cc..000000000 --- a/cardano-chain-gen/test/testfiles/fingerprint/restartAndRollback-alonzo +++ /dev/null @@ -1 +0,0 @@ -[6,13,46,52,54,56,69,79,90,98,104,114,124,144,152,209,210,235,261,303,309,327,335,353,373,383,388,392,397,409,452,460,461,469,483,503,544,546,562,568,571,580,597,602,622,638,655,656,677,701,707,715,721,736,746,749,781,800,808,811,817,818,828,833,836,838,851,858,874,910,923,925,929,937,972,981,984,986,989,999,1028,1040,1078,1081,1109,1115,1128,1143,1154,1161,1170,1182,1199,1201,1213,1225,1240,1251,1302,1322,1338,1345,1350,1355,1366,1371,1376,1394,1399,1402,1419,1430,1431,1444,1451,1469,1492,1497,1502,1507,1515,1520,1524,1528,1530,1538,1554,1567,1569,1574,1580,1593,1597,1626,1662,1666,1669,1674,1676,1689,1704,1709,1716,1733,1735,1742,1753,1758,1783,1800,1801,1817,1834,1836,1861,1869,1888,1904,1907,1910,1923,1924,1932,1942,1943,1949,1957,1966,1970,1977,1988,2001,2005,2008,2016,2033,2039,2051,2062,2066,2069,2080,2089,2095,2100,2101,2144,2153,2156,2195,2199,2211,2261,2262,2275,2276,2299,2310,2328,2367,2386,2388,2399,2404,2431,2445] \ No newline at end of file diff --git a/cardano-chain-gen/test/testfiles/fingerprint/simpleRollback-alonzo b/cardano-chain-gen/test/testfiles/fingerprint/simpleRollback-alonzo deleted file mode 100644 index a4814452f..000000000 --- a/cardano-chain-gen/test/testfiles/fingerprint/simpleRollback-alonzo +++ /dev/null @@ -1 +0,0 @@ -[6,43,46] \ No newline at end of file diff --git a/cardano-db-sync/Setup.hs b/cardano-db-sync/Setup.hs index 5b480a64b..2981eba25 100644 --- a/cardano-db-sync/Setup.hs +++ b/cardano-db-sync/Setup.hs @@ -94,11 +94,11 @@ generateMigrations locInfo srcDir outDir = do , " ]" ] --- We only care about "official" migrations, with a `mvStage` version >=1 and <= 3. +-- We only care about "official" migrations, with a `mvStage` version >=1 and <= 4. isOfficialMigrationFile :: FilePath -> Bool isOfficialMigrationFile fn = let stage = readStageFromFilename (takeFileName fn) - in takeExtension fn == ".sql" && stage >= 1 && stage <= 3 + in takeExtension fn == ".sql" && stage >= 1 && stage <= 4 where -- Reimplement part of `parseMigrationVersionFromFile` because that function is not avaliable -- here. Defaults to a stage value of `0`. diff --git a/cardano-db-sync/app/cardano-db-sync.hs b/cardano-db-sync/app/cardano-db-sync.hs index e6228ee76..d79f670bf 100644 --- a/cardano-db-sync/app/cardano-db-sync.hs +++ b/cardano-db-sync/app/cardano-db-sync.hs @@ -68,6 +68,7 @@ pRunDbSyncNode = <*> pHasLedger <*> pSkipFix <*> pOnlyFix + <*> pForceIndexes <*> optional pSlotNo pConfigFile :: Parser ConfigFile @@ -117,6 +118,13 @@ pSkipFix = <> Opt.help "Disables the db-sync fix procedure for the wrong datum and redeemer_data bytes." ) +pForceIndexes :: Parser Bool +pForceIndexes = + Opt.flag False True + ( Opt.long "force-indexes" + <> Opt.help "Forces the Index creation at the start of db-sync. Normally they're create later." + ) + pOnlyFix :: Parser Bool pOnlyFix = Opt.flag False True diff --git a/cardano-db-sync/src/Cardano/DbSync.hs b/cardano-db-sync/src/Cardano/DbSync.hs index 505844c3d..f923fcb23 100644 --- a/cardano-db-sync/src/Cardano/DbSync.hs +++ b/cardano-db-sync/src/Cardano/DbSync.hs @@ -78,22 +78,39 @@ runDbSync metricsSetters knownMigrations iomgr trce params aop snEveryFollowing -- Read the PG connection info pgConfig <- orDie Db.renderPGPassError $ newExceptT (Db.readPGPass $ enpPGPassSource params) - orDieWithLog Db.renderMigrationValidateError trce $ - Db.validateMigrations dbMigrationDir knownMigrations + mErrors <- liftIO $ Db.validateMigrations dbMigrationDir knownMigrations + whenJust mErrors $ \(unknown, allStage4) -> + if allStage4 then + logWarning trce $ Db.renderMigrationValidateError unknown + else do + let msg = Db.renderMigrationValidateError unknown + logError trce msg + panic msg logInfo trce "Schema migration files validated" logInfo trce "Running database migrations" - unofficial <- Db.runMigrations pgConfig True dbMigrationDir (Just $ Db.LogFileDir "/tmp") + let runMigration = Db.runMigrations pgConfig True dbMigrationDir (Just $ Db.LogFileDir "/tmp") + (ranAll, unofficial) <- if enpForceIndexes params then runMigration Db.Initial else runMigration Db.Full unless (null unofficial) $ logWarning trce $ "Unofficial migration scripts found: " <> textShow unofficial + if ranAll then + logInfo trce "Some migrations were not executed. They need to run when syncing has started." + else + logInfo trce "All migrations were executed" + + if enpForceIndexes params then + logInfo trce "New user indexes were not created. They may be created later if necessary." + else + logInfo trce "All user indexes were created" + let connectionString = Db.toConnectionString pgConfig -- For testing and debugging. whenJust (enpMaybeRollback params) $ \ slotNo -> void $ unsafeRollback trce pgConfig slotNo - runSyncNode metricsSetters trce iomgr aop snEveryFollowing snEveryLagging connectionString params + runSyncNode metricsSetters trce iomgr aop snEveryFollowing snEveryLagging connectionString ranAll (void . runMigration) params where dbMigrationDir :: Db.MigrationDir @@ -101,16 +118,6 @@ runDbSync metricsSetters knownMigrations iomgr trce params aop snEveryFollowing -- ------------------------------------------------------------------------------------------------- --- Log error to Trace and panic. -orDieWithLog :: (t -> Text) -> Trace IO Text -> ExceptT t IO () -> IO () -orDieWithLog render trce e = do - runExceptT e >>= \case - Left errors -> do - let errorStr = render errors - liftIO $ logError trce errorStr - panic errorStr - Right () -> pure () - startupReport :: Trace IO Text -> Bool -> SyncNodeParams -> IO () startupReport trce aop params = do logWarning trce $ mconcat ["Version number: ", Text.pack (showVersion version)] diff --git a/cardano-db-sync/src/Cardano/DbSync/Api.hs b/cardano-db-sync/src/Cardano/DbSync/Api.hs index 0603d2ca9..2ed50c997 100644 --- a/cardano-db-sync/src/Cardano/DbSync/Api.hs +++ b/cardano-db-sync/src/Cardano/DbSync/Api.hs @@ -10,9 +10,13 @@ module Cardano.DbSync.Api , LedgerEnv (..) , SyncOptions (..) , ConsistentLevel (..) + , RunMigration , setConsistentLevel , getConsistentLevel , isConsistent + , getIsSyncFixed + , setIsFixedAndMigrate + , runIndexMigrationsMaybe , mkSyncEnvFromConfig , replaceConnection , verifySnapshotPoint @@ -75,8 +79,11 @@ data SyncEnv = SyncEnv , envNetworkMagic :: !NetworkMagic , envSystemStart :: !SystemStart , envConnString :: ConnectionString + , envRunDelayedMigration :: RunMigration , envBackend :: !(StrictTVar IO (Strict.Maybe SqlBackend)) , envConsistentLevel :: !(StrictTVar IO ConsistentLevel) + , envIsFixed :: !(StrictTVar IO Bool) + , envIndexes :: !(StrictTVar IO Bool) , envOptions :: !SyncOptions , envCache :: !Cache , envOfflineWorkQueue :: !(TBQueue IO PoolFetchRetry) @@ -87,6 +94,8 @@ data SyncEnv = SyncEnv , envLedger :: !LedgerEnv } +type RunMigration = DB.MigrationToRun -> IO () + data ConsistentLevel = Consistent | DBAheadOfLedger | Unchecked deriving (Show, Eq) @@ -106,6 +115,28 @@ isConsistent env = do Consistent -> pure True _ -> pure False +getIsSyncFixed :: SyncEnv -> IO Bool +getIsSyncFixed = readTVarIO . envIsFixed + +setIsFixedAndMigrate :: SyncEnv -> IO () +setIsFixedAndMigrate env = do + envRunDelayedMigration env DB.Fix + atomically $ writeTVar (envIsFixed env) True + +runIndexMigrationsMaybe :: SyncEnv -> IO () +runIndexMigrationsMaybe env = do + haveRan <- readTVarIO $ envIndexes env + unless haveRan $ do + logInfo trce $ + mconcat + [ "Creating migrations. This may take a while." + , " Setting a higher maintenance_work_mem from Postgres usually speeds up this process." + ] + envRunDelayedMigration env DB.Indexes + atomically $ writeTVar (envIndexes env) True + where + trce = getTrace env + data SyncOptions = SyncOptions { soptExtended :: !Bool , soptAbortOnInvalid :: !Bool @@ -220,14 +251,16 @@ getCurrentTipBlockNo env = do mkSyncEnv :: Trace IO Text -> ConnectionString -> SyncOptions -> ProtocolInfo IO CardanoBlock -> Ledger.Network - -> NetworkMagic -> SystemStart -> LedgerStateDir + -> NetworkMagic -> SystemStart -> LedgerStateDir -> Bool -> Bool -> RunMigration -> IO SyncEnv -mkSyncEnv trce connSring syncOptions protoInfo nw nwMagic systemStart dir = do +mkSyncEnv trce connSring syncOptions protoInfo nw nwMagic systemStart dir ranAll forcedIndexes runMigration = do ledgerEnv <- mkLedgerEnv trce protoInfo dir nw systemStart (soptAbortOnInvalid syncOptions) (snapshotEveryFollowing syncOptions) (snapshotEveryLagging syncOptions) cache <- if soptCache syncOptions then newEmptyCache 100000 else pure uninitiatedCache backendVar <- newTVarIO Strict.Nothing consistentLevelVar <- newTVarIO Unchecked + fixDataVar <- newTVarIO ranAll + indexesVar <- newTVarIO forcedIndexes owq <- newTBQueueIO 100 orq <- newTBQueueIO 100 epochVar <- newTVarIO initEpochState @@ -238,9 +271,12 @@ mkSyncEnv trce connSring syncOptions protoInfo nw nwMagic systemStart dir = do , envNetworkMagic = nwMagic , envSystemStart = systemStart , envConnString = connSring + , envRunDelayedMigration = runMigration , envBackend = backendVar , envOptions = syncOptions , envConsistentLevel = consistentLevelVar + , envIsFixed = fixDataVar + , envIndexes = indexesVar , envCache = cache , envOfflineWorkQueue = owq , envOfflineResultQueue = orq @@ -250,8 +286,17 @@ mkSyncEnv trce connSring syncOptions protoInfo nw nwMagic systemStart dir = do , envLedger = ledgerEnv } -mkSyncEnvFromConfig :: Trace IO Text -> ConnectionString -> SyncOptions -> LedgerStateDir -> GenesisConfig -> IO (Either SyncNodeError SyncEnv) -mkSyncEnvFromConfig trce connSring syncOptions dir genCfg = +mkSyncEnvFromConfig + :: Trace IO Text + -> ConnectionString + -> SyncOptions + -> LedgerStateDir + -> GenesisConfig + -> Bool + -> Bool + -> RunMigration + -> IO (Either SyncNodeError SyncEnv) +mkSyncEnvFromConfig trce connSring syncOptions dir genCfg ranAll forcedIndexes runMigration = case genCfg of GenesisCardano _ bCfg sCfg _ | unProtocolMagicId (Byron.configProtocolMagicId bCfg) /= Shelley.sgNetworkMagic (scConfig sCfg) -> @@ -270,7 +315,7 @@ mkSyncEnvFromConfig trce connSring syncOptions dir genCfg = Right <$> mkSyncEnv trce connSring syncOptions (mkProtocolInfoCardano genCfg []) (Shelley.sgNetworkId $ scConfig sCfg) (NetworkMagic . unProtocolMagicId $ Byron.configProtocolMagicId bCfg) (SystemStart .Byron.gdStartTime $ Byron.configGenesisData bCfg) - dir + dir ranAll forcedIndexes runMigration -- | 'True' is for in memory points and 'False' for on disk diff --git a/cardano-db-sync/src/Cardano/DbSync/Cache.hs b/cardano-db-sync/src/Cardano/DbSync/Cache.hs index 869997abc..f69964dc1 100644 --- a/cardano-db-sync/src/Cardano/DbSync/Cache.hs +++ b/cardano-db-sync/src/Cardano/DbSync/Cache.hs @@ -36,7 +36,6 @@ import Control.Monad.Class.MonadSTM.Strict (StrictTVar, modifyTVar, ne import Control.Monad.Trans.Control (MonadBaseControl) import Data.Either.Combinators import qualified Data.Map.Strict as Map -import qualified Data.Set as Set import qualified Cardano.Ledger.Address as Ledger import Cardano.Ledger.BaseTypes (Network) @@ -188,38 +187,19 @@ newEmptyCache maCapacity = -- of an entry or even a wrong entry id, if the entry is reinserted on a different -- id after the rollback. -- --- IMPORTANT NOTE: we rely here on the fact that 'MultiAsset' and 'PoolHash' +-- IMPORTANT NOTE: we rely here on the fact that 'MultiAsset', 'StakeAddress' and 'PoolHash' -- tables don't have an ON DELETE reference and as a result are not cleaned up in -- case of a rollback. If this changes in the future, it is necessary that their -- cached values are also cleaned up. -- -- NOTE: BlockId is cleaned up on rollbacks, since it may get reinserted on -- a different id. --- NOTE: For 'StakeAddresses' we use a mixed approach. If the rollback is long we just drop --- everything, since it is very rare. If not, we query all the StakeAddressesId of blocks --- that wil be deleted. -rollbackCache :: MonadIO m => Cache -> Maybe Word64 -> Bool -> Word64 -> ReaderT SqlBackend m () -rollbackCache UninitiatedCache _ _ _ = pure () -rollbackCache (Cache cache) mBlockNo deleteEq nBlocks = do +-- NOTE: Other tables are not cleaned up since they are not rollbacked. +rollbackCache :: MonadIO m => Cache -> ReaderT SqlBackend m () +rollbackCache UninitiatedCache = pure () +rollbackCache (Cache cache) = do liftIO $ do - atomically $ writeTVar (cPools cache) Map.empty - atomically $ modifyTVar (cMultiAssets cache) LRU.cleanup atomically $ writeTVar (cPrevBlock cache) Nothing - rollbackStakeAddr cache mBlockNo deleteEq nBlocks - -rollbackStakeAddr :: MonadIO m => CacheInternal -> Maybe Word64 -> Bool -> Word64 -> ReaderT SqlBackend m () -rollbackStakeAddr ci mBlockNo deleteEq nBlocks = do - case mBlockNo of - Nothing -> liftIO $ atomically $ writeTVar (cStakeCreds ci) Map.empty - Just blockNo -> - if nBlocks > 600 - then liftIO $ atomically $ writeTVar (cStakeCreds ci) Map.empty - else do - initMp <- liftIO $ readTVarIO (cStakeCreds ci) - stakeAddrIdsToDelete <- DB.queryStakeAddressIdsAfter blockNo deleteEq - let stakeAddrIdsSetToDelete = Set.fromList stakeAddrIdsToDelete - let !mp = Map.filter (`Set.notMember` stakeAddrIdsSetToDelete) initMp - liftIO $ atomically $ writeTVar (cStakeCreds ci) mp queryRewardAccountWithCache :: forall m. MonadIO m => Cache -> CacheNew -> Ledger.RewardAcnt StandardCrypto diff --git a/cardano-db-sync/src/Cardano/DbSync/Config/Types.hs b/cardano-db-sync/src/Cardano/DbSync/Config/Types.hs index 3a7cd84eb..f452ab19f 100644 --- a/cardano-db-sync/src/Cardano/DbSync/Config/Types.hs +++ b/cardano-db-sync/src/Cardano/DbSync/Config/Types.hs @@ -71,6 +71,7 @@ data SyncNodeParams = SyncNodeParams , enpHasLedger :: !Bool , enpSkipFix :: !Bool , enpOnlyFix :: !Bool + , enpForceIndexes :: !Bool , enpMaybeRollback :: !(Maybe SlotNo) } diff --git a/cardano-db-sync/src/Cardano/DbSync/Database.hs b/cardano-db-sync/src/Cardano/DbSync/Database.hs index 76f58be68..88c25f3d7 100644 --- a/cardano-db-sync/src/Cardano/DbSync/Database.hs +++ b/cardano-db-sync/src/Cardano/DbSync/Database.hs @@ -27,6 +27,7 @@ import Cardano.DbSync.Default import Cardano.DbSync.Error import Cardano.DbSync.LedgerState import Cardano.DbSync.Metrics +import Cardano.DbSync.Rollback import Cardano.DbSync.Types import Cardano.DbSync.Util @@ -87,7 +88,7 @@ runActions env actions = do ([], DbFinish:_) -> do pure Done ([], DbRollBackToPoint chainSyncPoint serverTip resultVar : ys) -> do - deletedAllBlocks <- newExceptT $ rollbackToPoint env chainSyncPoint serverTip + deletedAllBlocks <- newExceptT $ prepareRollback env chainSyncPoint serverTip points <- if hasLedgerState env then lift $ rollbackLedger env chainSyncPoint else pure Nothing diff --git a/cardano-db-sync/src/Cardano/DbSync/Default.hs b/cardano-db-sync/src/Cardano/DbSync/Default.hs index 996c737e8..99d3744eb 100644 --- a/cardano-db-sync/src/Cardano/DbSync/Default.hs +++ b/cardano-db-sync/src/Cardano/DbSync/Default.hs @@ -5,7 +5,6 @@ {-# LANGUAGE OverloadedStrings #-} module Cardano.DbSync.Default ( insertListBlocks - , rollbackToPoint ) where @@ -45,6 +44,7 @@ import qualified Data.ByteString.Short as SBS import qualified Data.Map.Strict as Map import qualified Data.Set as Set import qualified Data.Strict.Maybe as Strict +-- import qualified Data.Text as Text import Database.Persist.SqlBackend.Internal import Database.Persist.SqlBackend.Internal.StatementCache @@ -98,13 +98,14 @@ applyAndInsertBlockMaybe env cblk = do insertBlock :: SyncEnv -> CardanoBlock -> ApplyResult -> Bool -> ExceptT SyncNodeError (ReaderT SqlBackend (LoggingT IO)) () -insertBlock env cblk applyRes logBlock = do +insertBlock env cblk applyRes firstAfterRollback = do !epochEvents <- liftIO $ atomically $ generateNewEpochEvents env (apSlotDetails applyRes) let !applyResult = applyRes { apEvents = sort $ epochEvents <> apEvents applyRes} let !details = apSlotDetails applyResult + liftIO $ migrateIndexesMaybe env details insertLedgerEvents env (sdEpochNo details) (apEvents applyResult) insertEpoch details - let shouldLog = hasEpochStartEvent (apEvents applyResult) || logBlock + let shouldLog = hasEpochStartEvent (apEvents applyResult) || firstAfterRollback let isMember poolId = Set.member poolId (apPoolsRegistered applyResult) case cblk of BlockByron blk -> @@ -170,6 +171,10 @@ insertLedgerEvents env currentEpochNo@(EpochNo curEpoch) = sqlBackend <- lift ask persistantCacheSize <- liftIO $ statementCacheSize $ connStmtMap sqlBackend liftIO . logInfo tracer $ "Persistant SQL Statement Cache size is " <> textShow persistantCacheSize +-- persistantTimes <- liftIO $ statementCacheGetTimes $ connStmtMap sqlBackend +-- liftIO $ writeFile ("/home/kostas/bench/" <> show (unEpochNo en)) $ +-- Text.unlines $ fmap (\(q,st) -> textShow st <> " " <> q) $ +-- sortOn (\(_, (tm, _)) -> tm) $ Map.toList persistantTimes stats <- liftIO $ textShowStats cache liftIO . logInfo tracer $ stats liftIO . logInfo tracer $ "Starting epoch " <> textShow (unEpochNo en) @@ -207,3 +212,9 @@ hasEpochStartEvent = any isNewEpoch LedgerNewEpoch {} -> True LedgerStartAtEpoch {} -> True _otherwise -> False + +migrateIndexesMaybe :: SyncEnv -> SlotDetails -> IO () +migrateIndexesMaybe env sd = when isWithinHalfHour $ do + runIndexMigrationsMaybe env + where + isWithinHalfHour = isSyncedWithinSeconds sd 1800 == SyncFollowing diff --git a/cardano-db-sync/src/Cardano/DbSync/Era/Shelley/Insert.hs b/cardano-db-sync/src/Cardano/DbSync/Era/Shelley/Insert.hs index 5e08aa25a..3eb79ce5a 100644 --- a/cardano-db-sync/src/Cardano/DbSync/Era/Shelley/Insert.hs +++ b/cardano-db-sync/src/Cardano/DbSync/Era/Shelley/Insert.hs @@ -117,7 +117,8 @@ insertShelleyBlock env shouldLog blk details isMember mNewEpoch stakeSlice = do let zippedTx = zip [0 .. ] (Generic.blkTxs blk) let txInserter = insertTx tracer cache (leNetwork lenv) isMember blkId (sdEpochNo details) (Generic.blkSlotNo blk) grouped <- foldM (\grouped (idx, tx) -> txInserter idx tx grouped) mempty zippedTx - insertBlockGroupedData tracer grouped + minIds <- insertBlockGroupedData tracer grouped + insertReverseIndex blkId minIds liftIO $ do let epoch = unEpochNo (sdEpochNo details) @@ -497,13 +498,12 @@ insertStakeAddress :: (MonadBaseControl IO m, MonadIO m) => DB.TxId -> Shelley.RewardAcnt StandardCrypto -> Maybe ByteString -> ReaderT SqlBackend m DB.StakeAddressId -insertStakeAddress txId rewardAddr stakeCredBs = +insertStakeAddress _txId rewardAddr stakeCredBs = DB.insertStakeAddress $ DB.StakeAddress { DB.stakeAddressHashRaw = addrBs , DB.stakeAddressView = Generic.renderRewardAcnt rewardAddr , DB.stakeAddressScriptHash = Generic.getCredentialScriptHash $ Ledger.getRwdCred rewardAddr - , DB.stakeAddressTxId = txId } where addrBs = fromMaybe (Ledger.serialiseRewardAcnt rewardAddr) stakeCredBs @@ -844,12 +844,11 @@ insertCostModel :: (MonadBaseControl IO m, MonadIO m) => DB.BlockId -> Map Language Ledger.CostModel -> ExceptT SyncNodeError (ReaderT SqlBackend m) DB.CostModelId -insertCostModel blkId cms = +insertCostModel _blkId cms = lift . DB.insertCostModel $ DB.CostModel { DB.costModelHash = Crypto.abstractHashToBytes $ Crypto.serializeCborHash cms , DB.costModelCosts = Text.decodeUtf8 $ LBS.toStrict $ Aeson.encode cms - , DB.costModelBlockId = blkId } insertEpochParam diff --git a/cardano-db-sync/src/Cardano/DbSync/Era/Shelley/Insert/Grouped.hs b/cardano-db-sync/src/Cardano/DbSync/Era/Shelley/Insert/Grouped.hs index 378f108c7..473a4fe20 100644 --- a/cardano-db-sync/src/Cardano/DbSync/Era/Shelley/Insert/Grouped.hs +++ b/cardano-db-sync/src/Cardano/DbSync/Era/Shelley/Insert/Grouped.hs @@ -7,6 +7,7 @@ module Cardano.DbSync.Era.Shelley.Insert.Grouped , MissingMaTxOut (..) , ExtendedTxOut (..) , insertBlockGroupedData + , insertReverseIndex , resolveTxInputs , resolveScriptHash ) where @@ -19,7 +20,7 @@ import qualified Data.List as List import Cardano.BM.Trace (Trace) -import Cardano.Db (DbLovelace (..), textShow) +import Cardano.Db (DbLovelace (..), minIdsToText, textShow) import qualified Cardano.Db as DB import qualified Cardano.DbSync.Era.Shelley.Generic as Generic @@ -66,17 +67,18 @@ instance Monoid BlockGroupedData where instance Semigroup BlockGroupedData where tgd1 <> tgd2 = BlockGroupedData (groupedTxIn tgd1 <> groupedTxIn tgd2) - (groupedTxOut tgd1 <> groupedTxOut tgd2) + (groupedTxOut tgd1 <> groupedTxOut tgd2) insertBlockGroupedData :: (MonadBaseControl IO m, MonadIO m) => Trace IO Text -> BlockGroupedData - -> ExceptT SyncNodeError (ReaderT SqlBackend m) () + -> ExceptT SyncNodeError (ReaderT SqlBackend m) DB.MinIds insertBlockGroupedData _tracer grouped = do txOutIds <- lift . DB.insertManyTxOut $ etoTxOut. fst <$> groupedTxOut grouped let maTxOuts = concatMap mkmaTxOuts $ zip txOutIds (snd <$> groupedTxOut grouped) - lift $ DB.insertManyMaTxOut maTxOuts - lift . DB.insertManyTxIn $ groupedTxIn grouped + maTxOutIds <- lift $ DB.insertManyMaTxOut maTxOuts + txInId <- lift . DB.insertManyTxIn $ groupedTxIn grouped + pure $ DB.MinIds (minimumMaybe txInId) (minimumMaybe txOutIds) (minimumMaybe maTxOutIds) where mkmaTxOuts :: (DB.TxOutId, [MissingMaTxOut]) -> [DB.MaTxOut] mkmaTxOuts (txOutId, mmtos) = mkmaTxOut txOutId <$> mmtos @@ -89,6 +91,16 @@ insertBlockGroupedData _tracer grouped = do , DB.maTxOutTxOutId = txOutId } +insertReverseIndex + :: (MonadBaseControl IO m, MonadIO m) + => DB.BlockId -> DB.MinIds -> ExceptT SyncNodeError (ReaderT SqlBackend m) () +insertReverseIndex blockId minIds = + void . lift . DB.insertReverseIndex $ + DB.ReverseIndex + { DB.reverseIndexBlockId = blockId + , DB.reverseIndexMinIds = minIdsToText minIds + } + -- | If we can't resolve from the db, we fall back to the provided outputs -- This happens the input consumes an output introduced in the same block. resolveTxInputs @@ -129,3 +141,8 @@ resolveInMemory txIn = List.find matches matches eutxo = Generic.txInHash txIn == etoTxHash eutxo && Generic.txInIndex txIn == DB.txOutIndex (etoTxOut eutxo) + +minimumMaybe :: (Ord a, Foldable f) => f a -> Maybe a +minimumMaybe xs + | null xs = Nothing + | otherwise = Just $ minimum xs diff --git a/cardano-db-sync/src/Cardano/DbSync/Era/Shelley/Query.hs b/cardano-db-sync/src/Cardano/DbSync/Era/Shelley/Query.hs index e85ddd21f..6b2d6ba3b 100644 --- a/cardano-db-sync/src/Cardano/DbSync/Era/Shelley/Query.hs +++ b/cardano-db-sync/src/Cardano/DbSync/Era/Shelley/Query.hs @@ -9,10 +9,8 @@ module Cardano.DbSync.Era.Shelley.Query ( queryPoolHashId , queryStakeAddress , queryStakeRefPtr - , queryStakeDelegation , queryResolveInput , queryResolveInputCredentials - , queryPoolUpdateByBlock ) where @@ -52,28 +50,6 @@ queryStakeAddress addr = do pure (saddr ^. StakeAddressId) pure $ maybeToEither (DbLookupMessage $ "StakeAddress " <> renderByteArray addr) unValue (listToMaybe res) -queryStakeDelegation - :: MonadIO m - => Ptr - -> ReaderT SqlBackend m (Maybe StakeAddressId) -queryStakeDelegation (Ptr (SlotNo slot) (TxIx txIx) (CertIx certIx)) = do - res <- select $ do - (dlg :& tx :& blk) <- - from $ table @Delegation - `innerJoin` table @Tx - `on` (\(dlg :& tx) -> tx ^. TxId ==. dlg ^. DelegationTxId) - `innerJoin` table @Block - `on` (\(_dlg :& tx :& blk) -> blk ^. BlockId ==. tx ^. TxBlockId) - where_ (blk ^. BlockSlotNo ==. just (val slot)) - where_ (tx ^. TxBlockIndex ==. val (fromIntegral txIx)) - where_ (dlg ^. DelegationCertIndex ==. val (fromIntegral certIx)) - -- Need to order by BlockSlotNo descending for correct behavior when there are two - -- or more delegation certificates in a single epoch. - orderBy [desc (blk ^. BlockSlotNo)] - limit 1 - pure (dlg ^. DelegationAddrId) - pure $ unValue <$> listToMaybe res - queryResolveInput :: MonadIO m => Generic.TxIn -> ReaderT SqlBackend m (Either LookupFail (TxId, DbLovelace)) queryResolveInput txIn = queryTxOutValue (Generic.txInHash txIn, fromIntegral (Generic.txInIndex txIn)) diff --git a/cardano-db-sync/src/Cardano/DbSync/Fix/PlutusDataBytes.hs b/cardano-db-sync/src/Cardano/DbSync/Fix/PlutusDataBytes.hs index b16abe66d..f12b87e43 100644 --- a/cardano-db-sync/src/Cardano/DbSync/Fix/PlutusDataBytes.hs +++ b/cardano-db-sync/src/Cardano/DbSync/Fix/PlutusDataBytes.hs @@ -36,7 +36,8 @@ import qualified Cardano.Ledger.Babbage.TxBody as Babbage import qualified Cardano.Ledger.Core as Ledger import qualified Cardano.Ledger.Era as Ledger -import qualified Cardano.Db as DB +import Cardano.Db (textShow) +import qualified Cardano.Db.Old.V13_0 as DB_V_13_0 import Cardano.BM.Trace (Trace, logInfo, logWarning) @@ -88,25 +89,27 @@ getNextPointList fds = case fds of [] -> Nothing fd : _ -> Just $ fpdPrevPoint fd -getWrongPlutusData :: SyncEnv -> IO FixData -getWrongPlutusData env = do - dbBackend <- getBackend env - liftIO $ logInfo (getTrace env) $ mconcat +getWrongPlutusData :: + (MonadBaseControl IO m, MonadIO m) + => Trace IO Text + -> ReaderT SqlBackend m FixData +getWrongPlutusData tracer = do + liftIO $ logInfo tracer $ mconcat [ "Starting the fixing Plutus Data bytes procedure. This may take a couple hours on mainnet if there are wrong values." , " You can skip it using --skip-plutus-data-fix." , " It will fix Datum and RedeemerData with wrong bytes. See more in Issue #1214 and #1278." , " This procedure makes resyncing unnecessary." ] - datumList <- DB.runDbIohkNoLogging dbBackend $ + datumList <- findWrongPlutusData - (getTrace env) "Datum" - DB.queryDatumCount DB.queryDatumPage (fmap f . DB.querydatumInfo . entityKey) - (DB.datumHash . entityVal) (DB.datumBytes . entityVal) - redeemerDataList <- DB.runDbIohkNoLogging dbBackend $ + tracer "Datum" + DB_V_13_0.queryDatumCount DB_V_13_0.queryDatumPage (fmap f . DB_V_13_0.querydatumInfo . entityKey) + (DB_V_13_0.datumHash . entityVal) (DB_V_13_0.datumBytes . entityVal) + redeemerDataList <- findWrongPlutusData - (getTrace env) "RedeemerData" - DB.queryRedeemerDataCount DB.queryRedeemerDataPage (fmap f . DB.queryRedeemerDataInfo . entityKey) - (DB.redeemerDataHash . entityVal) (DB.redeemerDataBytes . entityVal) + tracer "RedeemerData" + DB_V_13_0.queryRedeemerDataCount DB_V_13_0.queryRedeemerDataPage (fmap f . DB_V_13_0.queryRedeemerDataInfo . entityKey) + (DB_V_13_0.redeemerDataHash . entityVal) (DB_V_13_0.redeemerDataBytes . entityVal) pure $ FixData datumList redeemerDataList where f queryRes = do @@ -122,7 +125,7 @@ findWrongPlutusData :: -> Text -> m Word64 -- query count -> (Int64 -> Int64 -> m [a]) -- query a page - -> (a -> m (Maybe CardanoPoint)) -- get point and previous block point + -> (a -> m (Maybe CardanoPoint)) -- get previous block point -> (a -> ByteString) -- get the hash -> (a -> ByteString) -- get the stored bytes -> m [FixPlutusData] @@ -131,11 +134,11 @@ findWrongPlutusData tracer tableName qCount qPage qGetInfo getHash getBytes = do ["Trying to find ", tableName, " with wrong bytes"] count <- qCount liftIO $ logInfo tracer $ mconcat - ["There are ", DB.textShow count, " ", tableName, ". Need to scan them all."] + ["There are ", textShow count, " ", tableName, ". Need to scan them all."] datums <- findRec False 0 [] liftIO $ logInfo tracer $ Text.concat - [ "Found ", DB.textShow (length datums), " ", tableName + [ "Found ", textShow (length datums), " ", tableName , " with mismatch between bytes and hash." ] pure datums @@ -143,14 +146,14 @@ findWrongPlutusData tracer tableName qCount qPage qGetInfo getHash getBytes = do findRec :: Bool -> Int64 -> [[FixPlutusData]] -> m [FixPlutusData] findRec printedSome offset acc = do when (mod offset (10 * limit) == 0 && offset > 0) $ - liftIO $ logInfo tracer $ mconcat ["Checked ", DB.textShow offset, " ", tableName] + liftIO $ logInfo tracer $ mconcat ["Checked ", textShow offset, " ", tableName] ls <- qPage offset limit ls' <- filterM checkValidBytes ls ls'' <- mapMaybeM convertToFixPlutusData ls' newPrintedSome <- if null ls' || printedSome then pure printedSome else do liftIO $ logInfo tracer $ Text.concat [ "Found some wrong values already. The oldest ones are (hash, bytes): " - , DB.textShow $ (\a -> (bsBase16Encode $ getHash a, bsBase16Encode $ getBytes a)) <$> take 5 ls' + , textShow $ (\a -> (bsBase16Encode $ getHash a, bsBase16Encode $ getBytes a)) <$> take 5 ls' ] pure True let !newAcc = ls'' : acc @@ -162,7 +165,7 @@ findWrongPlutusData tracer tableName qCount qPage qGetInfo getHash getBytes = do checkValidBytes a = case mHashedBytes of Left msg -> do liftIO $ logWarning tracer $ - Text.concat ["Invalid Binary Data for hash ", DB.textShow actualHash , ": ", Text.pack msg] + Text.concat ["Invalid Binary Data for hash ", textShow actualHash , ": ", Text.pack msg] pure False Right hashedBytes -> pure $ hashedBytes /= actualHash where @@ -183,35 +186,32 @@ findWrongPlutusData tracer tableName qCount qPage qGetInfo getHash getBytes = do limit = 100_000 -fixPlutusData :: SyncEnv -> CardanoBlock -> FixData -> IO () -fixPlutusData env cblk fds = do - dbBackend <- liftIO $ getBackend env - DB.runDbIohkNoLogging dbBackend $ do - mapM_ (fixData True) $ fdDatum fds - mapM_ (fixData False) $ fdRedeemerData fds +fixPlutusData :: MonadIO m => Trace IO Text -> CardanoBlock -> FixData -> ReaderT SqlBackend m () +fixPlutusData tracer cblk fds = do + mapM_ (fixData True) $ fdDatum fds + mapM_ (fixData False) $ fdRedeemerData fds where fixData :: MonadIO m => Bool -> FixPlutusData -> ReaderT SqlBackend m () fixData isDatum fd = do case Map.lookup (fpdHash fd) correctBytesMap of Nothing -> pure () Just correctBytes | isDatum -> do - mDatumId <- DB.queryDatum $ fpdHash fd + mDatumId <- DB_V_13_0.queryDatum $ fpdHash fd case mDatumId of Just datumId -> - DB.upateDatumBytes datumId correctBytes + DB_V_13_0.upateDatumBytes datumId correctBytes Nothing -> liftIO $ logWarning tracer $ mconcat ["Datum", " not found in block"] Just correctBytes -> do - mRedeemerDataId <- DB.queryRedeemerData $ fpdHash fd + mRedeemerDataId <- DB_V_13_0.queryRedeemerData $ fpdHash fd case mRedeemerDataId of Just redeemerDataId -> - DB.upateRedeemerDataBytes redeemerDataId correctBytes + DB_V_13_0.upateRedeemerDataBytes redeemerDataId correctBytes Nothing -> liftIO $ logWarning tracer $ mconcat ["RedeemerData", " not found in block"] - tracer = getTrace env correctBytesMap = Map.union (scrapDatumsBlock cblk) (scrapRedeemerDataBlock cblk) scrapDatumsBlock :: CardanoBlock -> Map ByteString ByteString diff --git a/cardano-db-sync/src/Cardano/DbSync/Rollback.hs b/cardano-db-sync/src/Cardano/DbSync/Rollback.hs index 291d14ec9..fbb00a70f 100644 --- a/cardano-db-sync/src/Cardano/DbSync/Rollback.hs +++ b/cardano-db-sync/src/Cardano/DbSync/Rollback.hs @@ -1,10 +1,9 @@ {-# LANGUAGE GADTs #-} {-# LANGUAGE NoImplicitPrelude #-} {-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE TupleSections #-} module Cardano.DbSync.Rollback - ( rollbackToPoint + ( prepareRollback , rollbackFromBlockNo , unsafeRollback ) where @@ -30,8 +29,11 @@ import Ouroboros.Consensus.HardFork.Combinator.AcrossEras (getOneEraHa import Ouroboros.Network.Block import Ouroboros.Network.Point +-- Rollbacks are done in an Era generic way based on the 'Point' we are +-- rolling back to. rollbackFromBlockNo :: MonadIO m => SyncEnv -> BlockNo -> ExceptT SyncNodeError (ReaderT SqlBackend m) () rollbackFromBlockNo env blkNo = do + lift $ rollbackCache cache nBlocks <- lift $ DB.queryBlockCountAfterBlockNo (unBlockNo blkNo) True mBlockId <- lift $ DB.queryBlockNo (unBlockNo blkNo) whenStrictJust (maybeToStrict mBlockId) $ \blockId -> do @@ -41,87 +43,49 @@ rollbackFromBlockNo env blkNo = do , " or equal to " , textShow blkNo ] - lift $ rollbackCache cache (Just $ unBlockNo blkNo) True (fromIntegral nBlocks) - deleted <- lift $ DB.deleteCascadeAfter blockId True - liftIO . logInfo trce $ - if deleted - then "Blocks deleted" - else "No blocks need to be deleted" + lift $ DB.deleteBlocksBlockId trce blockId + liftIO . logInfo trce $ "Blocks deleted" where trce = getTrace env cache = envCache env --- Rollbacks are done in an Era generic way based on the 'Point' we are --- rolling back to. -rollbackToPoint :: SyncEnv -> CardanoPoint -> Tip CardanoBlock -> IO (Either SyncNodeError Bool) -rollbackToPoint env point serverTip = do +prepareRollback :: SyncEnv -> CardanoPoint -> Tip CardanoBlock -> IO (Either SyncNodeError Bool) +prepareRollback env point serverTip = do backend <- getBackend env DB.runDbIohkNoLogging backend $ runExceptT action where trce = getTrace env - cache = envCache env - - slotsToDelete :: MonadIO m => WithOrigin SlotNo -> ReaderT SqlBackend m (Maybe SlotNo, Word64) - slotsToDelete wosl = - case wosl of - Origin -> do - mSlotNo <- DB.queryLastSlotNo - countSlotNos <- DB.queryCountSlotNo - pure (mSlotNo, countSlotNos) - At sl -> do - mSlotNo <- DB.queryLastSlotNoGreaterThan (unSlotNo sl) - countSlotNos <- DB.queryCountSlotNosGreaterThan (unSlotNo sl) - pure (mSlotNo, countSlotNos) action :: MonadIO m => ExceptT SyncNodeError (ReaderT SqlBackend m) Bool action = do - (mSlotNo, nBlocks) <- lift $ slotsToDelete (pointSlot point) - (prevId, mBlockNo) <- liftLookupFail "Rollback.rollbackToPoint" $ queryBlock point - - if nBlocks <= 50 || not (hasLedgerState env) then do - liftIO . logInfo trce $ "Rolling back to " <> renderPoint point - whenStrictJust (maybeToStrict mSlotNo) $ \slotNo -> - -- there may be more deleted blocks than slots, because ebbs don't have - -- a slot. We can only make an estimation here. + case getPoint point of + Origin -> do + nBlocks <- lift DB.queryCountSlotNo + if nBlocks == 0 then do + liftIO . logInfo trce $ "Starting from Genesis" + else do liftIO . logInfo trce $ - mconcat - [ "Deleting ", textShow nBlocks, " blocks up to slot " - , textShow (unSlotNo slotNo) - ] - -- We delete the block right after the point we rollback to. This delete - -- should cascade to the rest of the chain. - - -- 'length xs' here gives an approximation of the blocks deleted. An approximation - -- is good enough, since it is only used to decide on the best policy and is not - -- important for correctness. - -- We need to first cleanup the cache and then delete the blocks from db. - lift $ rollbackCache cache mBlockNo False (fromIntegral nBlocks) - deleted <- lift $ DB.deleteCascadeAfter prevId False - liftIO . logInfo trce $ - if deleted - then "Blocks deleted" - else "No blocks need to be deleted" - pure True - else do + mconcat + [ "Delaying delete of ", textShow nBlocks + , " while rolling back to genesis." + , " Applying blocks until a new block is found." + , " The node is currently at " + , textShow serverTip + ] + At blk -> do + nBlocks <- lift $ DB.queryCountSlotNosGreaterThan (unSlotNo $ blockPointSlot blk) + mBlockNo <- liftLookupFail "Rollback.prepareRollback" $ + DB.queryBlockHashBlockNo (SBS.fromShort . getOneEraHash $ blockPointHash blk) liftIO . logInfo trce $ mconcat [ "Delaying delete of ", textShow nBlocks, " blocks after " , textShow mBlockNo, " while rolling back to (" , renderPoint point , "). Applying blocks until a new block is found. The node is currently at ", textShow serverTip ] - pure False - -queryBlock :: MonadIO m => Point CardanoBlock - -> ReaderT SqlBackend m (Either DB.LookupFail (DB.BlockId, Maybe Word64)) -queryBlock pnt = do - case getPoint pnt of - Origin -> - fmap (, Nothing) <$> DB.queryGenesis - At blkPoint -> - DB.queryBlockNoId (SBS.fromShort . getOneEraHash $ blockPointHash blkPoint) + pure False -- For testing and debugging. unsafeRollback :: Trace IO Text -> DB.PGConfig -> SlotNo -> IO (Either SyncNodeError ()) unsafeRollback trce config slotNo = do logInfo trce $ "Forced rollback to slot " <> textShow (unSlotNo slotNo) - Right <$> DB.runDbNoLogging (DB.PGPassCached config) (void $ DB.deleteCascadeSlotNo slotNo) + Right <$> DB.runDbNoLogging (DB.PGPassCached config) (void $ DB.deleteBlocksSlotNo trce slotNo) diff --git a/cardano-db-sync/src/Cardano/DbSync/Sync.hs b/cardano-db-sync/src/Cardano/DbSync/Sync.hs index f88ff856c..ae2402358 100644 --- a/cardano-db-sync/src/Cardano/DbSync/Sync.hs +++ b/cardano-db-sync/src/Cardano/DbSync/Sync.hs @@ -65,7 +65,7 @@ import Data.Functor.Contravariant (contramap) import qualified Data.List as List import qualified Data.Text as Text -import Database.Persist.Postgresql (ConnectionString, withPostgresqlConn) +import Database.Persist.Postgresql (ConnectionString, SqlBackend, withPostgresqlConn) import Network.Mux (MuxTrace, WithMuxBearer) import Network.Mux.Types (MuxMode (..)) @@ -110,6 +110,7 @@ import Ouroboros.Network.Subscription (SubscriptionTrace) import System.Directory (createDirectoryIfMissing) +import Cardano.Db (runDbIohkLogging) runSyncNode :: MetricSetters @@ -119,9 +120,11 @@ runSyncNode -> Word64 -> Word64 -> ConnectionString + -> Bool + -> RunMigration -> SyncNodeParams -> IO () -runSyncNode metricsSetters trce iomgr aop snEveryFollowing snEveryLagging dbConnString enp = do +runSyncNode metricsSetters trce iomgr aop snEveryFollowing snEveryLagging dbConnString ranAll runMigration enp = do let configFile = enpConfigFile enp enc <- readSyncNodeConfig configFile @@ -135,6 +138,9 @@ runSyncNode metricsSetters trce iomgr aop snEveryFollowing snEveryLagging dbConn orDie renderSyncNodeError $ do genCfg <- readCardanoGenesisConfig enc logProtocolMagicId trce $ genesisProtocolMagicId genCfg + syncEnv <- ExceptT $ mkSyncEnvFromConfig trce dbConnString + (SyncOptions (enpExtended enp) aop (enpHasCache enp) (enpHasLedger enp) (enpSkipFix enp) (enpOnlyFix enp) snEveryFollowing snEveryLagging) + (enpLedgerStateDir enp) genCfg ranAll (enpForceIndexes enp) runMigration -- If the DB is empty it will be inserted, otherwise it will be validated (to make -- sure we are on the right chain). @@ -147,9 +153,7 @@ runSyncNode metricsSetters trce iomgr aop snEveryFollowing snEveryLagging dbConn case genCfg of GenesisCardano {} -> do - syncEnv <- ExceptT $ mkSyncEnvFromConfig trce dbConnString - (SyncOptions (enpExtended enp) aop (enpHasCache enp) (enpHasLedger enp) (enpSkipFix enp) (enpOnlyFix enp) snEveryFollowing snEveryLagging) - (enpLedgerStateDir enp) genCfg + liftIO $ runSyncNodeClient metricsSetters syncEnv iomgr trce (enpSocketPath enp) where useShelleyInit :: SyncNodeConfig -> Bool @@ -232,16 +236,25 @@ dbSyncProtocols trce env metricsSetters _version codecs _connectionId = Db.runIohkLogging trce $ withPostgresqlConn (envConnString env) $ \backend -> liftIO $ do replaceConnection env backend setConsistentLevel env Unchecked - unless (soptSkipFix $ envOptions env) $ do - fd <- getWrongPlutusData env + + isFixed <- getIsSyncFixed env + let skipFix = soptSkipFix $ envOptions env + let onlyFix = soptOnlyFix $ envOptions env + if onlyFix || (not isFixed && not skipFix) then do + when (onlyFix && isFixed) $ logInfo trce "Running once more to validate" + fd <- runDbIohkLogging backend (getTrace env) $ getWrongPlutusData (getTrace env) unless (nullData fd) $ void $ runPeer localChainSyncTracer (cChainSyncCodec codecs) channel - (Client.chainSyncClientPeer $ chainSyncClientFix env fd) + (Client.chainSyncClientPeer $ + chainSyncClientFix backend (getTrace env) fd) + setIsFixedAndMigrate env + when onlyFix exitSuccess - unless (soptOnlyFix $ envOptions env) $ do + else do + when skipFix $ setIsFixedAndMigrate env -- The Db thread is not forked at this point, so we can use -- the connection here. A connection cannot be used concurrently by many -- threads @@ -408,18 +421,17 @@ drainThePipe n0 client = go n0 , recvMsgRollBackward = \_pt _tip -> pure $ go n' } -chainSyncClientFix :: SyncEnv -> FixData -> ChainSyncClient CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO () -chainSyncClientFix env fixData = Client.ChainSyncClient $ do +chainSyncClientFix + :: SqlBackend -> Trace IO Text -> FixData -> ChainSyncClient CardanoBlock (Point CardanoBlock) (Tip CardanoBlock) IO () +chainSyncClientFix backend tracer fixData = Client.ChainSyncClient $ do liftIO $ logInfo tracer "Starting chainsync to fix Plutus Data. This will update database values in tables datum and redeemer_data." clientStIdle True (sizeFixData fixData) fixData where - tracer = getTrace env - updateSizeAndLog :: Int -> Int -> IO Int updateSizeAndLog lastSize currentSize = do let diffSize = lastSize - currentSize if lastSize >= currentSize && diffSize >= 200_000 then do - logInfo tracer $ mconcat ["Fixed ", textShow (sizeFixData fixData - currentSize), " Plutus Data"] + liftIO $ logInfo tracer $ mconcat ["Fixed ", textShow (sizeFixData fixData - currentSize), " Plutus Data"] pure currentSize else pure lastSize @@ -431,8 +443,8 @@ chainSyncClientFix env fixData = Client.ChainSyncClient $ do pure $ Client.SendMsgDone () Just (point, fdOnPoint, fdRest) -> do when shouldLog $ - logInfo tracer $ mconcat ["Starting fixing Plutus Data ", textShow point] - newLastSize <- updateSizeAndLog lastSize (sizeFixData fds) + liftIO $ logInfo tracer $ mconcat ["Starting fixing Plutus Data ", textShow point] + newLastSize <- liftIO $ updateSizeAndLog lastSize (sizeFixData fds) pure $ Client.SendMsgFindIntersect [point] (Client.ClientStIntersect @@ -445,7 +457,7 @@ chainSyncClientFix env fixData = Client.ChainSyncClient $ do clientStNext lastSize fdOnPoint fdRest = Client.ClientStNext { Client.recvMsgRollForward = \blk _tip -> Client.ChainSyncClient $ do - fixPlutusData env blk fdOnPoint + runDbIohkLogging backend tracer $ fixPlutusData tracer blk fdOnPoint clientStIdle False lastSize fdRest , Client.recvMsgRollBackward = \_point _tip -> Client.ChainSyncClient $ diff --git a/cardano-db-tool/app/cardano-db-tool.hs b/cardano-db-tool/app/cardano-db-tool.hs index c4047f0a9..0d76794b4 100644 --- a/cardano-db-tool/app/cardano-db-tool.hs +++ b/cardano-db-tool/app/cardano-db-tool.hs @@ -58,7 +58,7 @@ runCommand cmd = CmdRollback slotNo -> runRollback slotNo CmdRunMigrations mdir mldir -> do pgConfig <- orDie renderPGPassError $ newExceptT (readPGPass PGPassDefaultEnv) - unofficial <- runMigrations pgConfig False mdir mldir + unofficial <- runMigrations pgConfig False mdir mldir Initial unless (null unofficial) $ putStrLn $ "Unofficial migration scripts found: " ++ show unofficial CmdUtxoSetAtBlock blkid -> utxoSetAtSlot blkid @@ -76,7 +76,7 @@ runCreateMigration mdir = do runRollback :: SlotNo -> IO () runRollback slotNo = - print =<< runDbNoLoggingEnv (deleteCascadeSlotNo slotNo) + print =<< runDbNoLoggingEnv (deleteBlocksSlotNoNoTrace slotNo) runVersionCommand :: IO () runVersionCommand = do diff --git a/cardano-db-tool/src/Cardano/DbTool/PrepareSnapshot.hs b/cardano-db-tool/src/Cardano/DbTool/PrepareSnapshot.hs index 632e30d82..0c62419db 100644 --- a/cardano-db-tool/src/Cardano/DbTool/PrepareSnapshot.hs +++ b/cardano-db-tool/src/Cardano/DbTool/PrepareSnapshot.hs @@ -18,17 +18,12 @@ import Ouroboros.Network.Block hiding (blockHash) import Paths_cardano_db_tool (version) -import System.IO (hFlush, stdout) - newtype PrepareSnapshotArgs = PrepareSnapshotArgs { unPrepareSnapshotArgs :: LedgerStateDir } runPrepareSnapshot :: PrepareSnapshotArgs -> IO () -runPrepareSnapshot = runPrepareSnapshotAux True - -runPrepareSnapshotAux :: Bool -> PrepareSnapshotArgs -> IO () -runPrepareSnapshotAux firstTry args = do +runPrepareSnapshot args = do ledgerFiles <- listLedgerStateFilesOrdered (unPrepareSnapshotArgs args) mblock <- runDbNoLoggingEnv queryLatestBlock case mblock of @@ -49,36 +44,17 @@ runPrepareSnapshotAux firstTry args = do , " (full ", show (Base16.encode bHash), ")" , " and the closest ledger state file is at " , show (lsfSlotNo file), " ", show (lsfHash file) + , ". DBSync no longer requires them to match and " + , "no rollback will be performed." ] - if firstTry then do - interactiveRollback $ lsfSlotNo file - runPrepareSnapshotAux False args - else - putStrLn "After a rollback the db is in sync with no ledger state file" + let bblockNo = fromMaybe 0 $ blockBlockNo block + printCreateSnapshot bblockNo (lsfFilePath file) (_, []) -> - putStrLn "No ledger state file matches the db tip. You need to run db-sync before creating a snapshot" + putStrLn "No ledger state file before the tip found. Snapshots without ledger are not supported yet." _ -> do putStrLn "The db is empty. You need to sync from genesis and then create a snapshot." where - interactiveRollback :: SlotNo -> IO () - interactiveRollback slot = do - putStr $ "Do you want to rollback the db to " ++ show slot ++ " (Y/n): " - hFlush stdout - input <- getLine - case input of - "n" -> pure () - _ -> do - putStrLn $ "Rolling back to " ++ show slot - runRollback slot - putStrLn "Rolling back done. Revalidating from scratch" - putStrLn "" - - runRollback :: SlotNo -> IO () - runRollback slot = runDbNoLoggingEnv $ do - slots <- querySlotNosGreaterThan $ unSlotNo slot - mapM_ deleteCascadeSlotNo slots - printNewerSnapshots :: [LedgerStateFile] -> IO () printNewerSnapshots newerFiles = do unless (null newerFiles) $ diff --git a/cardano-db-tool/src/Cardano/DbTool/Validate/TxAccounting.hs b/cardano-db-tool/src/Cardano/DbTool/Validate/TxAccounting.hs index 3d927eed2..da960c23d 100644 --- a/cardano-db-tool/src/Cardano/DbTool/Validate/TxAccounting.hs +++ b/cardano-db-tool/src/Cardano/DbTool/Validate/TxAccounting.hs @@ -54,8 +54,8 @@ data ValidateError = ValidateError } randomTxIds :: Int -> (Word64, Word64) -> IO [Word64] -randomTxIds c (minTxId, maxIxId) = - List.sort <$> replicateM c (Random.randomRIO (minTxId, maxIxId)) +randomTxIds c (minTxId', maxIxId) = + List.sort <$> replicateM c (Random.randomRIO (minTxId', maxIxId)) reportError :: ValidateError -> String reportError ve = diff --git a/cardano-db/cardano-db.cabal b/cardano-db/cardano-db.cabal index abb8afc51..7a2aa6f06 100644 --- a/cardano-db/cardano-db.cabal +++ b/cardano-db/cardano-db.cabal @@ -32,6 +32,7 @@ library -Wunused-packages exposed-modules: Cardano.Db + Cardano.Db.Old.V13_0 other-modules: Cardano.Db.Delete Cardano.Db.Error @@ -40,6 +41,9 @@ library Cardano.Db.Migration Cardano.Db.Migration.Haskell Cardano.Db.Migration.Version + Cardano.Db.MinId + Cardano.Db.Old.V13_0.Schema + Cardano.Db.Old.V13_0.Query Cardano.Db.Query Cardano.Db.Run Cardano.Db.RevFromGit @@ -48,7 +52,6 @@ library Cardano.Db.Schema.Orphans Cardano.Db.Text Cardano.Db.Types - Cardano.Db.Update Cardano.Db.Version build-depends: aeson @@ -59,6 +62,7 @@ library , cardano-crypto-class , cardano-ledger-core , cardano-ledger-shelley + , cardano-prelude , cardano-slotting , containers , conduit @@ -76,7 +80,6 @@ library , memory , monad-control , monad-logger - , ouroboros-network , persistent , persistent-documentation , persistent-postgresql diff --git a/cardano-db/src/Cardano/Db.hs b/cardano-db/src/Cardano/Db.hs index 986fd4143..46a0d2048 100644 --- a/cardano-db/src/Cardano/Db.hs +++ b/cardano-db/src/Cardano/Db.hs @@ -16,6 +16,7 @@ import Cardano.Db.Error as X import Cardano.Db.Insert as X import Cardano.Db.Migration as X import Cardano.Db.Migration.Version as X +import Cardano.Db.MinId as X import Cardano.Db.PGConfig as X import Cardano.Db.Query as X import Cardano.Db.Run as X @@ -23,5 +24,4 @@ import Cardano.Db.Schema as X import Cardano.Db.Schema.Types as X import Cardano.Db.Text as X import Cardano.Db.Types as X -import Cardano.Db.Update as X import Cardano.Db.Version (gitRev) diff --git a/cardano-db/src/Cardano/Db/Delete.hs b/cardano-db/src/Cardano/Db/Delete.hs index 178f54c12..44723851d 100644 --- a/cardano-db/src/Cardano/Db/Delete.hs +++ b/cardano-db/src/Cardano/Db/Delete.hs @@ -1,59 +1,138 @@ +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} + module Cardano.Db.Delete - ( deleteCascadeBlock - , deleteCascadeAfter - , deleteCascadeBlockNo - , deleteCascadeSlotNo + ( deleteBlocksSlotNo + , deleteBlocksSlotNoNoTrace , deleteDelistedPool + , deleteBlocksBlockId + , deleteBlock ) where import Cardano.Slotting.Slot (SlotNo (..)) -import Control.Monad.IO.Class (MonadIO) +import Control.Monad.Extra (whenJust) +import Control.Monad.IO.Class (MonadIO, liftIO) import Control.Monad.Trans.Reader (ReaderT) -import Database.Persist.Sql (SqlBackend, delete, selectKeysList, (!=.), (==.)) +import Cardano.BM.Trace (Trace, logWarning, nullTracer) +import Database.Esqueleto.Experimental (PersistEntity, PersistField, persistIdField) +import Database.Persist.Sql (PersistEntityBackend, SqlBackend, delete, selectKeysList, (==.), (>=.)) +import Database.Persist.Class.PersistQuery (deleteWhere) import Data.ByteString (ByteString) +import Data.Maybe (isJust) +import Data.Text (Text) +import Cardano.Db.MinId +import Cardano.Db.Query hiding (isJust) import Cardano.Db.Schema -import Ouroboros.Network.Block (BlockNo (..)) +deleteBlocksSlotNoNoTrace :: MonadIO m => SlotNo -> ReaderT SqlBackend m Bool +deleteBlocksSlotNoNoTrace = deleteBlocksSlotNo nullTracer -- | Delete a block if it exists. Returns 'True' if it did exist and has been -- deleted and 'False' if it did not exist. -deleteCascadeBlock :: MonadIO m => Block -> ReaderT SqlBackend m Bool -deleteCascadeBlock block = do - keys <- selectKeysList [ BlockHash ==. blockHash block ] [] - mapM_ delete keys - pure $ not (null keys) +deleteBlocksSlotNo :: MonadIO m => Trace IO Text -> SlotNo -> ReaderT SqlBackend m Bool +deleteBlocksSlotNo trce (SlotNo slotNo) = do + mBlockId <- queryBlockSlotNo slotNo + case mBlockId of + Nothing -> pure False + Just blockId -> do + deleteBlocksBlockId trce blockId + pure True --- | Delete a block after the specified 'BlockId'. Returns 'True' if it did exist and has been --- deleted and 'False' if it did not exist. -deleteCascadeAfter :: MonadIO m => BlockId -> Bool -> ReaderT SqlBackend m Bool -deleteCascadeAfter bid deleteEq = do - -- Genesis artificial blocks are not deleted (Byron or Shelley) since they have null epoch - keys <- - if deleteEq - then selectKeysList [ BlockId ==. bid, BlockEpochNo !=. Nothing ] [] - else selectKeysList [ BlockPreviousId ==. Just bid, BlockEpochNo !=. Nothing ] [] - mapM_ delete keys - pure $ not (null keys) +-- | Delete starting from a 'BlockId'. +deleteBlocksBlockId :: MonadIO m => Trace IO Text -> BlockId -> ReaderT SqlBackend m () +deleteBlocksBlockId trce blockId = do + mMinIds <- fmap (textToMinId =<<) <$> queryReverseIndexBlockId blockId + (cminIds, completed) <- findMinIdsRec mMinIds mempty + mTxId <- queryMinRefId TxBlockId blockId + minIds <- if completed then pure cminIds else completeMinId mTxId cminIds + deleteTablesAfterBlockId blockId mTxId minIds --- | Delete a block if it exists. Returns 'True' if it did exist and has been --- deleted and 'False' if it did not exist. -deleteCascadeBlockNo :: MonadIO m => BlockNo -> ReaderT SqlBackend m Bool -deleteCascadeBlockNo (BlockNo blockNo) = do - keys <- selectKeysList [ BlockBlockNo ==. Just blockNo ] [] - mapM_ delete keys - pure $ not (null keys) + where + findMinIdsRec :: MonadIO m => [Maybe MinIds] -> MinIds -> ReaderT SqlBackend m (MinIds, Bool) + findMinIdsRec [] minIds = pure (minIds, True) + findMinIdsRec (mMinIds : rest) minIds = + case mMinIds of + Nothing -> do + liftIO $ logWarning trce + "Failed to find ReverseInex. Deletion may take longer." + pure (minIds, False) + Just minIdDB -> do + let minIds' = minIds <> minIdDB + if isComplete minIds' + then pure (minIds', True) + else findMinIdsRec rest minIds' --- | Delete a block if it exists. Returns 'True' if it did exist and has been --- deleted and 'False' if it did not exist. -deleteCascadeSlotNo :: MonadIO m => SlotNo -> ReaderT SqlBackend m Bool -deleteCascadeSlotNo (SlotNo slotNo) = do - keys <- selectKeysList [ BlockSlotNo ==. Just slotNo ] [] - mapM_ delete keys - pure $ not (null keys) + isComplete (MinIds m1 m2 m3) = isJust m1 && isJust m2 && isJust m3 + +completeMinId :: MonadIO m => Maybe TxId -> MinIds -> ReaderT SqlBackend m MinIds +completeMinId mTxId minIds = do + case mTxId of + Nothing -> pure mempty + Just txId -> do + mTxInId <- whenNothingQueryMinRefId (minTxInId minIds) TxInTxInId txId + mTxOutId <- whenNothingQueryMinRefId (minTxOutId minIds) TxOutTxId txId + mMaTxOutId <- case mTxOutId of + Nothing -> pure Nothing + Just txOutId -> whenNothingQueryMinRefId (minMaTxOutId minIds) MaTxOutTxOutId txOutId + pure $ MinIds mTxInId mTxOutId mMaTxOutId + +deleteTablesAfterBlockId :: MonadIO m => BlockId -> Maybe TxId -> MinIds -> ReaderT SqlBackend m () +deleteTablesAfterBlockId blkId mtxId minIds = do + deleteWhere [AdaPotsBlockId >=. blkId] + deleteWhere [ReverseIndexBlockId >=. blkId] + deleteWhere [EpochParamBlockId >=. blkId] + deleteTablesAfterTxId mtxId (minTxInId minIds) (minTxOutId minIds) (minMaTxOutId minIds) + deleteWhere [BlockId >=. blkId] + +deleteTablesAfterTxId :: MonadIO m => Maybe TxId -> Maybe TxInId -> Maybe TxOutId -> Maybe MaTxOutId -> ReaderT SqlBackend m () +deleteTablesAfterTxId mtxId mtxInId mtxOutId mmaTxOutId = do + whenJust mtxInId $ \txInId -> deleteWhere [TxInId >=. txInId] + whenJust mmaTxOutId $ \maTxOutId -> deleteWhere [MaTxOutId >=. maTxOutId] + whenJust mtxOutId $ \txOutId -> deleteWhere [TxOutId >=. txOutId] + + whenJust mtxId $ \txId -> do + queryFirstAndDeleteAfter CollateralTxOutTxId txId + queryFirstAndDeleteAfter CollateralTxInTxInId txId + queryFirstAndDeleteAfter ReferenceTxInTxInId txId + queryFirstAndDeleteAfter PoolMetadataRefRegisteredTxId txId + queryFirstAndDeleteAfter PoolRetireAnnouncedTxId txId + queryFirstAndDeleteAfter StakeRegistrationTxId txId + queryFirstAndDeleteAfter StakeDeregistrationTxId txId + queryFirstAndDeleteAfter DelegationTxId txId + queryFirstAndDeleteAfter TxMetadataTxId txId + queryFirstAndDeleteAfter WithdrawalTxId txId + queryFirstAndDeleteAfter TreasuryTxId txId + queryFirstAndDeleteAfter ReserveTxId txId + queryFirstAndDeleteAfter PotTransferTxId txId + queryFirstAndDeleteAfter MaTxMintTxId txId + queryFirstAndDeleteAfter RedeemerTxId txId + queryFirstAndDeleteAfter ScriptTxId txId + queryFirstAndDeleteAfter DatumTxId txId + queryFirstAndDeleteAfter RedeemerDataTxId txId + queryFirstAndDeleteAfter ExtraKeyWitnessTxId txId + queryFirstAndDeleteAfter ParamProposalRegisteredTxId txId + minPoolUpdate <- queryMinRefId PoolUpdateRegisteredTxId txId + whenJust minPoolUpdate $ \puid -> do + queryFirstAndDeleteAfter PoolOwnerPoolUpdateId puid + queryFirstAndDeleteAfter PoolRelayUpdateId puid + deleteWhere [PoolUpdateId >=. puid] + deleteWhere [TxId >=. txId] + +queryFirstAndDeleteAfter + :: forall m record field. (MonadIO m, PersistEntity record, PersistField field, PersistEntityBackend record ~ SqlBackend) + => EntityField record field -> field -> ReaderT SqlBackend m () +queryFirstAndDeleteAfter txIdField txId = do + mRecordId <- queryMinRefId txIdField txId + whenJust mRecordId $ \recordId -> + deleteWhere [persistIdField @record >=. recordId] -- | Delete a delisted pool if it exists. Returns 'True' if it did exist and has been -- deleted and 'False' if it did not exist. @@ -62,3 +141,25 @@ deleteDelistedPool poolHash = do keys <- selectKeysList [ DelistedPoolHashRaw ==. poolHash ] [] mapM_ delete keys pure $ not (null keys) + +whenNothingQueryMinRefId :: + forall m record field. (MonadIO m, PersistEntity record, PersistField field) + => Maybe (Key record) + -> EntityField record field + -> field + -> ReaderT SqlBackend m (Maybe (Key record)) +whenNothingQueryMinRefId mKey efield field = do + case mKey of + Just k -> pure $ Just k + Nothing -> queryMinRefId efield field + +-- | Delete a block if it exists. Returns 'True' if it did exist and has been +-- deleted and 'False' if it did not exist. +deleteBlock :: MonadIO m => Block -> ReaderT SqlBackend m Bool +deleteBlock block = do + mBlockId <- listToMaybe <$> selectKeysList [ BlockHash ==. blockHash block ] [] + case mBlockId of + Nothing -> pure False + Just blockId -> do + deleteBlocksBlockId nullTracer blockId + pure True diff --git a/cardano-db/src/Cardano/Db/Insert.hs b/cardano-db/src/Cardano/Db/Insert.hs index 91755f8b0..d7d9c534f 100644 --- a/cardano-db/src/Cardano/Db/Insert.hs +++ b/cardano-db/src/Cardano/Db/Insert.hs @@ -19,10 +19,8 @@ module Cardano.Db.Insert , insertManyRewards , insertManyTxIn , insertMaTxMint - , insertMaTxOut , insertManyMaTxOut , insertMeta - , insertMultiAsset , insertMultiAssetUnchecked , insertParamProposal , insertPotTransfer @@ -50,6 +48,7 @@ module Cardano.Db.Insert , insertCostModel , insertDatum , insertRedeemerData + , insertReverseIndex , insertCheckPoolOfflineData , insertCheckPoolOfflineFetchError , insertReservedPoolTicker @@ -57,9 +56,6 @@ module Cardano.Db.Insert -- Export mainly for testing. , insertBlockChecked - , insertCheckUnique - , insertManyUncheckedUnique - , insertUnchecked ) where @@ -75,8 +71,8 @@ import Data.Proxy (Proxy (..)) import Data.Text (Text) import qualified Data.Text as Text -import Database.Persist.Class (AtLeastOneUniqueKey, PersistEntityBackend, checkUnique, - insert, insertBy, replaceUnique) +import Database.Persist.Class (AtLeastOneUniqueKey, PersistEntityBackend, PersistEntity, + checkUnique, insert, insertBy, replaceUnique, ) import Database.Persist.EntityDef.Internal (entityDB, entityUniques) import Database.Persist.Sql (OnlyOneUniqueKey, PersistRecordBackend, SqlBackend, UniqueDef, entityDef, insertMany, rawExecute, rawSql, toPersistFields, @@ -103,18 +99,15 @@ import Cardano.Db.Schema -- One alternative is to just use `insert` but that fails on some uniquness constraints on some -- tables (about 6 out of a total of 25+). -- --- Instead we use `insertUnchecked` for tables where uniqueness constraints are unlikley to be hit --- and `insertChecked` for tables where the uniqueness constraint might can be hit. +-- Instead we use `insertUnchecked` for tables where there is no uniqueness constraints +-- and `insertChecked` for tables where the uniqueness constraint might hit. insertAdaPots :: (MonadBaseControl IO m, MonadIO m) => AdaPots -> ReaderT SqlBackend m AdaPotsId -insertAdaPots = insertCheckUnique "AdaPots" +insertAdaPots = insertUnchecked "AdaPots" insertBlock :: (MonadBaseControl IO m, MonadIO m) => Block -> ReaderT SqlBackend m BlockId insertBlock = insertUnchecked "Block" -insertBlockChecked :: (MonadBaseControl IO m, MonadIO m) => Block -> ReaderT SqlBackend m BlockId -insertBlockChecked = insertCheckUnique "Block" - insertCollateralTxIn :: (MonadBaseControl IO m, MonadIO m) => CollateralTxIn -> ReaderT SqlBackend m CollateralTxInId insertCollateralTxIn = insertUnchecked "CollateralTxIn" @@ -122,7 +115,7 @@ insertReferenceTxIn :: (MonadBaseControl IO m, MonadIO m) => ReferenceTxIn -> Re insertReferenceTxIn = insertUnchecked "ReferenceTxIn" insertDelegation :: (MonadBaseControl IO m, MonadIO m) => Delegation -> ReaderT SqlBackend m DelegationId -insertDelegation = insertCheckUnique "Delegation" +insertDelegation = insertUnchecked "Delegation" insertEpoch :: (MonadBaseControl IO m, MonadIO m) => Epoch -> ReaderT SqlBackend m EpochId insertEpoch = insertUnchecked "Epoch" @@ -134,7 +127,7 @@ insertEpochSyncTime :: (MonadBaseControl IO m, MonadIO m) => EpochSyncTime -> Re insertEpochSyncTime = insertReplace "EpochSyncTime" insertExtraKeyWitness :: (MonadBaseControl IO m, MonadIO m) => ExtraKeyWitness -> ReaderT SqlBackend m ExtraKeyWitnessId -insertExtraKeyWitness = insertCheckUnique "ExtraKeyWitness" +insertExtraKeyWitness = insertUnchecked "ExtraKeyWitness" insertManyEpochStakes :: (MonadBaseControl IO m, MonadIO m) => [EpochStake] -> ReaderT SqlBackend m () insertManyEpochStakes = insertManyUncheckedUnique "Many EpochStake" @@ -142,24 +135,18 @@ insertManyEpochStakes = insertManyUncheckedUnique "Many EpochStake" insertManyRewards :: (MonadBaseControl IO m, MonadIO m) => [Reward] -> ReaderT SqlBackend m () insertManyRewards = insertManyUncheckedUnique "Many Rewards" -insertManyTxIn :: (MonadBaseControl IO m, MonadIO m) => [TxIn] -> ReaderT SqlBackend m () -insertManyTxIn = insertManyUncheckedUnique "Many TxIn" +insertManyTxIn :: (MonadBaseControl IO m, MonadIO m) => [TxIn] -> ReaderT SqlBackend m [TxInId] +insertManyTxIn = insertMany' "Many TxIn" insertMaTxMint :: (MonadBaseControl IO m, MonadIO m) => MaTxMint -> ReaderT SqlBackend m MaTxMintId -insertMaTxMint = insertCheckUnique "insertMaTxMint" +insertMaTxMint = insertUnchecked "insertMaTxMint" -insertMaTxOut :: (MonadBaseControl IO m, MonadIO m) => MaTxOut -> ReaderT SqlBackend m MaTxOutId -insertMaTxOut = insertCheckUnique "insertMaTxOut" - -insertManyMaTxOut :: (MonadBaseControl IO m, MonadIO m) => [MaTxOut] -> ReaderT SqlBackend m () -insertManyMaTxOut = insertManyUncheckedUnique "Many MaTxOut" +insertManyMaTxOut :: (MonadBaseControl IO m, MonadIO m) => [MaTxOut] -> ReaderT SqlBackend m [MaTxOutId] +insertManyMaTxOut = insertMany' "Many MaTxOut" insertMeta :: (MonadBaseControl IO m, MonadIO m) => Meta -> ReaderT SqlBackend m MetaId insertMeta = insertCheckUnique "Meta" -insertMultiAsset :: (MonadBaseControl IO m, MonadIO m) => MultiAsset -> ReaderT SqlBackend m MultiAssetId -insertMultiAsset = insertCheckUnique "MultiAsset" - insertMultiAssetUnchecked :: (MonadBaseControl IO m, MonadIO m) => MultiAsset -> ReaderT SqlBackend m MultiAssetId insertMultiAssetUnchecked = insertUnchecked "MultiAsset" @@ -185,7 +172,7 @@ insertPoolRetire :: (MonadBaseControl IO m, MonadIO m) => PoolRetire -> ReaderT insertPoolRetire = insertUnchecked "PoolRetire" insertPoolUpdate :: (MonadBaseControl IO m, MonadIO m) => PoolUpdate -> ReaderT SqlBackend m PoolUpdateId -insertPoolUpdate = insertCheckUnique "PoolUpdate" +insertPoolUpdate = insertUnchecked "PoolUpdate" insertReserve :: (MonadBaseControl IO m, MonadIO m) => Reserve -> ReaderT SqlBackend m ReserveId insertReserve = insertUnchecked "Reserve" @@ -215,7 +202,7 @@ insertTxIn :: (MonadBaseControl IO m, MonadIO m) => TxIn -> ReaderT SqlBackend m insertTxIn = insertUnchecked "TxIn" insertTxMetadata :: (MonadBaseControl IO m, MonadIO m) => TxMetadata -> ReaderT SqlBackend m TxMetadataId -insertTxMetadata = insertCheckUnique "TxMetadata" +insertTxMetadata = insertUnchecked "TxMetadata" insertTxOut :: (MonadBaseControl IO m, MonadIO m) => TxOut -> ReaderT SqlBackend m TxOutId insertTxOut = insertUnchecked "TxOut" @@ -230,7 +217,7 @@ insertWithdrawal :: (MonadBaseControl IO m, MonadIO m) => Withdrawal -> ReaderT insertWithdrawal = insertUnchecked "Withdrawal" insertRedeemer :: (MonadBaseControl IO m, MonadIO m) => Redeemer -> ReaderT SqlBackend m RedeemerId -insertRedeemer = insertCheckUnique "Redeemer" +insertRedeemer = insertUnchecked "Redeemer" insertCostModel :: (MonadBaseControl IO m, MonadIO m) => CostModel -> ReaderT SqlBackend m CostModelId insertCostModel = insertCheckUnique "CostModel" @@ -241,6 +228,9 @@ insertDatum = insertCheckUnique "Datum" insertRedeemerData :: (MonadBaseControl IO m, MonadIO m) => RedeemerData -> ReaderT SqlBackend m RedeemerDataId insertRedeemerData = insertCheckUnique "RedeemerData" +insertReverseIndex :: (MonadBaseControl IO m, MonadIO m) => ReverseIndex -> ReaderT SqlBackend m ReverseIndexId +insertReverseIndex = insertUnchecked "ReverseIndex" + insertCheckPoolOfflineData :: (MonadBaseControl IO m, MonadIO m) => PoolOfflineData -> ReaderT SqlBackend m () insertCheckPoolOfflineData pod = do foundPool <- existsPoolHashId (poolOfflineDataPoolId pod) @@ -394,11 +384,10 @@ insertReplace vtype record = -- even tables with uniqueness constraints, especially block, tx and many others, where -- uniqueness is enforced by the ledger. insertUnchecked - :: ( AtLeastOneUniqueKey record - , MonadIO m + :: ( MonadIO m , MonadBaseControl IO m , PersistEntityBackend record ~ SqlBackend - ) + , PersistEntity record) => String -> record -> ReaderT SqlBackend m (Key record) insertUnchecked vtype = handle exceptHandler . insert @@ -424,3 +413,8 @@ onlyOneUniqueDef prxy = case entityUniques (entityDef prxy) of [uniq] -> uniq _ -> error "impossible due to OnlyOneUniqueKey constraint" + +-- Used in tests + +insertBlockChecked :: (MonadBaseControl IO m, MonadIO m) => Block -> ReaderT SqlBackend m BlockId +insertBlockChecked = insertCheckUnique "Block" diff --git a/cardano-db/src/Cardano/Db/Migration.hs b/cardano-db/src/Cardano/Db/Migration.hs index a8147312f..851cc6730 100644 --- a/cardano-db/src/Cardano/Db/Migration.hs +++ b/cardano-db/src/Cardano/Db/Migration.hs @@ -4,6 +4,7 @@ module Cardano.Db.Migration ( MigrationDir (..) , LogFileDir (..) + , MigrationToRun (..) , applyMigration , createMigration , getMigrationScripts @@ -23,7 +24,6 @@ import Control.Exception (SomeException, handle) import Control.Monad.Extra import Control.Monad.IO.Class (liftIO) import Control.Monad.Logger (NoLoggingT) -import Control.Monad.Trans.Except (ExceptT, throwE) import Control.Monad.Trans.Reader (ReaderT) import Control.Monad.Trans.Resource (runResourceT) @@ -34,7 +34,7 @@ import Data.Conduit.Process (sourceCmdWithConsumer) import Data.Either (partitionEithers) import Data.List ((\\)) import qualified Data.List as List -import Data.Maybe (fromMaybe, listToMaybe) +import Data.Maybe (fromMaybe) import Data.Text (Text) import qualified Data.Text as Text import qualified Data.Text.IO as Text @@ -49,6 +49,7 @@ import Cardano.Crypto.Hash (Blake2b_256, ByteString, Hash, hashToStrin import Cardano.Db.Migration.Haskell import Cardano.Db.Migration.Version import Cardano.Db.PGConfig +import Cardano.Db.Query import Cardano.Db.Run import Cardano.Db.Schema import Cardano.Db.Text @@ -63,6 +64,7 @@ import Text.Read (readMaybe) newtype MigrationDir = MigrationDir FilePath + deriving Show newtype LogFileDir = LogFileDir FilePath @@ -77,50 +79,93 @@ data MigrationValidateError = UnknownMigrationsFound , extraMigrations :: [MigrationValidate] } deriving (Eq, Show) +data MigrationToRun = Initial | Full | Fix | Indexes + -- | Run the migrations in the provided 'MigrationDir' and write date stamped log file -- to 'LogFileDir'. It returns a list of file names of all non-official schema migration files. -runMigrations :: PGConfig -> Bool -> MigrationDir -> Maybe LogFileDir -> IO [FilePath] -runMigrations pgconfig quiet migrationDir mLogfiledir = do - scripts <- getMigrationScripts migrationDir - case mLogfiledir of - Nothing -> do +runMigrations :: PGConfig -> Bool -> MigrationDir -> Maybe LogFileDir -> MigrationToRun -> IO (Bool, [FilePath]) +runMigrations pgconfig quiet migrationDir mLogfiledir mToRun = do + allScripts <- getMigrationScripts migrationDir + ranAll <- case (mLogfiledir, allScripts) of + (_, []) -> + error $ "Empty schema dir " ++ show migrationDir + (Nothing, schema : scripts) -> do putStrLn "Running:" - forM_ scripts $ applyMigration migrationDir quiet pgconfig Nothing stdout + applyMigration' Nothing stdout schema + (scripts', ranAll) <- filterMigrations scripts + forM_ scripts' $ applyMigration' Nothing stdout putStrLn "Success!" - Just logfiledir -> do + pure ranAll + (Just logfiledir, schema : scripts) -> do logFilename <- genLogFilename logfiledir withFile logFilename AppendMode $ \logHandle -> do unless quiet $ putStrLn "Running:" - forM_ scripts $ applyMigration migrationDir quiet pgconfig (Just logFilename) logHandle + applyMigration' (Just logFilename) logHandle schema + (scripts', ranAll) <- filterMigrations scripts + print ranAll + forM_ scripts' $ applyMigration' (Just logFilename) logHandle unless quiet $ putStrLn "Success!" - pure $ map (takeFileName . snd) (filter isUnofficialMigration scripts) + pure ranAll + pure (ranAll, map (takeFileName . snd) (filter isUnofficialMigration allScripts)) where + isUnofficialMigration :: (MigrationVersion, FilePath) -> Bool + isUnofficialMigration (mv, _) = mvStage mv < 1 || mvStage mv > 4 + genLogFilename :: LogFileDir -> IO FilePath genLogFilename (LogFileDir logdir) = (logdir ) . formatTime defaultTimeLocale ("migrate-" ++ iso8601DateFormat (Just "%H%M%S") ++ ".log") <$> getCurrentTime - isUnofficialMigration :: (MigrationVersion, FilePath) -> Bool - isUnofficialMigration (mv, _) = mvStage mv < 1 || mvStage mv > 3 + applyMigration' = applyMigration migrationDir quiet pgconfig + + filterMigrations :: [(MigrationVersion, FilePath)] -> IO ([(MigrationVersion, FilePath)], Bool) + filterMigrations scripts = case mToRun of + Full -> do + mVersion <- runWithConnectionNoLogging (PGPassCached pgconfig) querySchemaVersion + case mVersion of + Just (SchemaVersion _ v _) | v == hardCoded3_0 -> do + pure (filter (not . filterFix) scripts, False) + _ -> pure (scripts, True) + Initial -> do + mVersion <- runWithConnectionNoLogging (PGPassCached pgconfig) querySchemaVersion + case mVersion of + Just (SchemaVersion _ v _) | v == hardCoded3_0 -> do + pure (filter (\m -> not $ filterFix m || filterIndexes m) scripts, False) + _ -> pure (filter (not . filterIndexes) scripts, True) + Fix -> pure (filter filterFix scripts, False) + Indexes -> pure (filter filterIndexes scripts, False) + + filterFix (mv, _) = mvStage mv == 2 && mvVersion mv > hardCoded3_0 + filterIndexes (mv, _) = mvStage mv == 4 + +hardCoded3_0 :: Int +hardCoded3_0 = 19 -- Build hash for each file found in a directory. -validateMigrations :: MigrationDir -> [(Text, Text)] -> ExceptT MigrationValidateError IO () +validateMigrations :: MigrationDir -> [(Text, Text)] -> IO (Maybe (MigrationValidateError, Bool)) validateMigrations migrationDir knownMigrations = do let knownMigs = uncurry MigrationValidate <$> knownMigrations scripts <- filter (isOfficialMigrationFile . Text.unpack . mvFilepath) <$> liftIO (hashMigrations migrationDir) - when (scripts /= knownMigs) $ - throwE $ UnknownMigrationsFound - { missingMigrations = knownMigs \\ scripts -- Migrations missing at runtime that were present at compilation time - , extraMigrations = scripts \\ knownMigs -- Migrations found at runtime that were missing at compilation time - } - + if scripts == knownMigs + then pure Nothing + else do + let missingMigr = knownMigs \\ scripts + let extraMigr = scripts \\ knownMigs + let unknown = UnknownMigrationsFound + { missingMigrations = missingMigr -- Migrations missing at runtime that were present at compilation time + , extraMigrations = extraMigr -- Migrations found at runtime that were missing at compilation time + } + pure $ Just (unknown, all stage4 $ missingMigr <> extraMigr) + where + stage4 = (== 4) . readStageFromFilename . Text.unpack . mvFilepath applyMigration :: MigrationDir -> Bool -> PGConfig -> Maybe FilePath -> Handle -> (MigrationVersion, FilePath) -> IO () applyMigration (MigrationDir location) quiet pgconfig mLogFilename logHandle (version, script) = do -- This assumes that the credentials for 'psql' are already sorted out. -- One way to achive this is via a 'PGPASSFILE' environment variable -- as per the PostgreSQL documentation. + print version let command = List.unwords [ "psql" @@ -155,6 +200,16 @@ applyMigration (MigrationDir location) quiet pgconfig mLogFilename logHandle (ve Nothing -> pure () Just logFilename -> putStrLn $ "\nErrors in file: " ++ logFilename ++ "\n" exitFailure +{-} +migrateIndexes :: IO () +migrateIndexes = pure () + +migrateSyncFix :: IO () +migrateSyncFix = pure () + +isSyncFixed :: IO Bool +isSyncFixed = pure False +-} -- | Create a database migration (using functionality built into Persistent). If no -- migration is needed return 'Nothing' otherwise return the migration as 'Text'. @@ -275,15 +330,16 @@ renderMigrationValidateError = \case isOfficialMigrationFile :: FilePath -> Bool isOfficialMigrationFile fn = - let stage = readStageFromFilename (takeFileName fn) - in takeExtension fn == ".sql" && stage >= 1 && stage <= 3 + takeExtension fn == ".sql" && stage >= 1 && stage <= 4 where - -- Reimplement part of `parseMigrationVersionFromFile` because that function is not avaliable - -- here. Defaults to a stage value of `0`. - readStageFromFilename :: String -> Int - readStageFromFilename str = - case takeWhile isDigit . drop 1 $ dropWhile (/= '-') str of - stage -> fromMaybe 0 $ readMaybe stage + stage = readStageFromFilename fn + +-- Reimplement part of `parseMigrationVersionFromFile` because that function is not avaliable +-- here. Defaults to a stage value of `0`. +readStageFromFilename :: FilePath -> Int +readStageFromFilename fn = + case takeWhile isDigit . drop 1 $ dropWhile (/= '-') (takeFileName fn) of + stage -> fromMaybe 0 $ readMaybe stage noLedgerMigrations :: SqlBackend -> Trace IO Text -> IO () noLedgerMigrations backend trce = do diff --git a/cardano-db/src/Cardano/Db/MinId.hs b/cardano-db/src/Cardano/Db/MinId.hs new file mode 100644 index 000000000..79feb04e6 --- /dev/null +++ b/cardano-db/src/Cardano/Db/MinId.hs @@ -0,0 +1,61 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE FlexibleContexts #-} + +module Cardano.Db.MinId where + +import Cardano.Prelude +import Cardano.Db.Schema +import Cardano.Db.Text +import qualified Data.Text as Text +import Database.Persist.Sql (SqlBackend, ToBackendKey, fromSqlKey, toSqlKey) + +data MinIds = MinIds + { minTxInId :: Maybe TxInId + , minTxOutId :: Maybe TxOutId + , minMaTxOutId :: Maybe MaTxOutId + } + +instance Monoid MinIds where + mempty = MinIds Nothing Nothing Nothing + +instance Semigroup MinIds where + mn1 <> mn2 = + MinIds + { minTxInId = minJust (minTxInId mn1) (minTxInId mn2) + , minTxOutId = minJust (minTxOutId mn1) (minTxOutId mn2) + , minMaTxOutId = minJust (minMaTxOutId mn1) (minMaTxOutId mn2) + } + +textToMinId :: Text -> Maybe MinIds +textToMinId txt = + case Text.split (== ':') txt of + [tminTxInId, tminTxOutId, tminMaTxOutId] -> + Just $ + MinIds + { minTxInId = toSqlKey <$> readKey tminTxInId + , minTxOutId = toSqlKey <$> readKey tminTxOutId + , minMaTxOutId = toSqlKey <$> readKey tminMaTxOutId + } + _ -> Nothing + where + readKey :: Text -> Maybe Int64 + readKey "" = Nothing + readKey str = readMaybe (Text.unpack str) + +minIdsToText :: MinIds -> Text +minIdsToText minIds = + Text.intercalate ":" + [ fromKey $ minTxInId minIds + , fromKey $ minTxOutId minIds + , fromKey $ minMaTxOutId minIds + ] + where + fromKey :: ToBackendKey SqlBackend record => Maybe (Key record) -> Text + fromKey Nothing = "" + fromKey (Just k) = textShow $ fromSqlKey k + + +minJust :: Ord a => Maybe a -> Maybe a -> Maybe a +minJust (Just a) (Just b) = Just $ min a b +minJust (Just a) _ = Just a +minJust _ x = x diff --git a/cardano-db/src/Cardano/Db/Old/V13_0.hs b/cardano-db/src/Cardano/Db/Old/V13_0.hs new file mode 100644 index 000000000..75754c2a1 --- /dev/null +++ b/cardano-db/src/Cardano/Db/Old/V13_0.hs @@ -0,0 +1,6 @@ +module Cardano.Db.Old.V13_0 + ( module X + ) where + +import Cardano.Db.Old.V13_0.Query as X +import Cardano.Db.Old.V13_0.Schema as X diff --git a/cardano-db/src/Cardano/Db/Old/V13_0/Query.hs b/cardano-db/src/Cardano/Db/Old/V13_0/Query.hs new file mode 100644 index 000000000..012d4f1a4 --- /dev/null +++ b/cardano-db/src/Cardano/Db/Old/V13_0/Query.hs @@ -0,0 +1,128 @@ +{-# LANGUAGE ExplicitNamespaces #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} + + +module Cardano.Db.Old.V13_0.Query + ( queryDatum + , queryDatumPage + , queryDatumCount + , querydatumInfo + , queryRedeemerData + , queryRedeemerDataPage + , queryRedeemerDataCount + , queryRedeemerDataInfo + , upateDatumBytes + , upateRedeemerDataBytes + ) where + +import Control.Monad.IO.Class (MonadIO) +import Control.Monad.Trans.Reader (ReaderT) + +import Data.ByteString.Char8 (ByteString) +import Data.Int (Int64) +import Data.Maybe (listToMaybe) +import Data.Word (Word64) + +import Database.Esqueleto.Experimental (Entity (..), SqlBackend, Value, countRows, from, offset, unValue, innerJoin, + just, on, table, select, type (:&) ((:&)), val, (^.), where_, (==.), + asc, orderBy, limit) +import Database.Persist ((=.)) +import Database.Persist.Class + +import Cardano.Db.Old.V13_0.Schema + +{- HLINT ignore "Fuse on/on" -} + +queryDatum :: MonadIO m => ByteString -> ReaderT SqlBackend m (Maybe DatumId) +queryDatum hsh = do + xs <- select $ do + datum <- from $ table @Datum + where_ (datum ^. DatumHash ==. val hsh) + pure (datum ^. DatumId) + pure $ unValue <$> listToMaybe xs + +queryDatumPage :: MonadIO m => Int64 -> Int64 -> ReaderT SqlBackend m [Entity Datum] +queryDatumPage ofs lmt = + select $ do + datum <- from $ table @Datum + orderBy [asc (datum ^. DatumId)] + limit lmt + offset ofs + pure datum + +queryDatumCount :: MonadIO m => ReaderT SqlBackend m Word64 +queryDatumCount = do + xs <- select $ do + _ <- from $ table @Datum + pure countRows + pure $ maybe 0 unValue (listToMaybe xs) + +querydatumInfo :: MonadIO m => DatumId -> ReaderT SqlBackend m (Maybe (ByteString, Maybe Word64)) +querydatumInfo datumId = do + res <- select $ do + (_blk :& _tx :& datum :& prevBlock) <- + from $ table @Block + `innerJoin` table @Tx + `on` (\(blk :& tx) -> tx ^. TxBlockId ==. blk ^. BlockId) + `innerJoin` table @Datum + `on` (\(_blk :& tx :& datum) -> datum ^. DatumTxId ==. tx ^. TxId) + `innerJoin` table @Block + `on` (\(blk :& _tx :& _datum :& prevBlk) -> blk ^. BlockPreviousId ==. just (prevBlk ^. BlockId)) + where_ (datum ^. DatumId ==. val datumId) + pure (prevBlock ^. BlockHash, prevBlock ^. BlockSlotNo) + pure $ unValue2 <$> listToMaybe res + + +queryRedeemerData :: MonadIO m => ByteString -> ReaderT SqlBackend m (Maybe RedeemerDataId) +queryRedeemerData hsh = do + xs <- select $ do + rdmrDt <- from $ table @RedeemerData + where_ (rdmrDt ^. RedeemerDataHash ==. val hsh) + pure (rdmrDt ^. RedeemerDataId) + pure $ unValue <$> listToMaybe xs + +queryRedeemerDataPage :: MonadIO m => Int64 -> Int64 -> ReaderT SqlBackend m [Entity RedeemerData] +queryRedeemerDataPage ofs lmt = + select $ do + redeemerData <- from $ table @RedeemerData + orderBy [asc (redeemerData ^. RedeemerDataId)] + limit lmt + offset ofs + pure redeemerData + +queryRedeemerDataCount :: MonadIO m => ReaderT SqlBackend m Word64 +queryRedeemerDataCount = do + xs <- select $ do + _ <- from $ table @RedeemerData + pure countRows + pure $ maybe 0 unValue (listToMaybe xs) + +queryRedeemerDataInfo :: MonadIO m => RedeemerDataId -> ReaderT SqlBackend m (Maybe (ByteString, Maybe Word64)) +queryRedeemerDataInfo rdmDataId = do + res <- select $ do + (_blk :& _tx :& rdmData :& prevBlock) <- + from $ table @Block + `innerJoin` table @Tx + `on` (\(blk :& tx) -> tx ^. TxBlockId ==. blk ^. BlockId) + `innerJoin` table @RedeemerData + `on` (\(_blk :& tx :& rdmData) -> rdmData ^. RedeemerDataTxId ==. tx ^. TxId) + `innerJoin` table @Block + `on` (\(blk :& _tx :& _rdmData :& prevBlk) -> blk ^. BlockPreviousId ==. just (prevBlk ^. BlockId)) + where_ (rdmData ^. RedeemerDataId ==. val rdmDataId) + pure (prevBlock ^. BlockHash, prevBlock ^. BlockSlotNo) + pure $ unValue2 <$> listToMaybe res + + +upateDatumBytes :: MonadIO m => DatumId -> ByteString -> ReaderT SqlBackend m () +upateDatumBytes datumId bytes = update datumId [DatumBytes =. bytes] + +upateRedeemerDataBytes :: MonadIO m => RedeemerDataId -> ByteString -> ReaderT SqlBackend m () +upateRedeemerDataBytes rdmDataId bytes = update rdmDataId [RedeemerDataBytes =. bytes] + + +unValue2 :: (Value a, Value b) -> (a, b) +unValue2 (a, b) = (unValue a, unValue b) diff --git a/cardano-db/src/Cardano/Db/Old/V13_0/Schema.hs b/cardano-db/src/Cardano/Db/Old/V13_0/Schema.hs new file mode 100644 index 000000000..2e8e38981 --- /dev/null +++ b/cardano-db/src/Cardano/Db/Old/V13_0/Schema.hs @@ -0,0 +1,101 @@ +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveDataTypeable #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE QuasiQuotes #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE UndecidableInstances #-} + +module Cardano.Db.Old.V13_0.Schema where + +import Cardano.Db.Schema.Orphans () + +import Cardano.Db.Types (DbLovelace, DbWord64) + +import Data.ByteString.Char8 (ByteString) +import Data.Int (Int64) +import Data.Text (Text) +import Data.Time.Clock (UTCTime) +import Data.Word (Word16, Word64) + +import Database.Persist.Documentation (deriveShowFields) +import Database.Persist.TH + +share + [ mkPersist sqlSettings + , mkEntityDefList "entityDefs" + , deriveShowFields + ] + [persistLowerCase| + + Block + hash ByteString sqltype=hash32type + epochNo Word64 Maybe sqltype=word31type + slotNo Word64 Maybe sqltype=word63type + epochSlotNo Word64 Maybe sqltype=word31type + blockNo Word64 Maybe sqltype=word31type + previousId BlockId Maybe OnDeleteCascade + slotLeaderId SlotLeaderId noreference + size Word64 sqltype=word31type + time UTCTime sqltype=timestamp + txCount Word64 + protoMajor Word16 sqltype=word31type + protoMinor Word16 sqltype=word31type + -- Shelley specific + vrfKey Text Maybe + opCert ByteString Maybe sqltype=hash32type + opCertCounter Word64 Maybe sqltype=word63type + UniqueBlock hash + + SlotLeader + hash ByteString sqltype=hash28type + poolHashId PoolHashId Maybe noreference -- This will be non-null when a block is mined by a pool. + description Text -- Description of the Slots leader. + UniqueSlotLeader hash + + PoolHash + hashRaw ByteString sqltype=hash28type + view Text + UniquePoolHash hashRaw + + Tx + hash ByteString sqltype=hash32type + blockId BlockId OnDeleteCascade -- This type is the primary key for the 'block' table. + blockIndex Word64 sqltype=word31type -- The index of this transaction within the block. + outSum DbLovelace sqltype=lovelace + fee DbLovelace sqltype=lovelace + deposit Int64 -- Needs to allow negaitve values. + size Word64 sqltype=word31type + + -- New for Allega + invalidBefore DbWord64 Maybe sqltype=word64type + invalidHereafter DbWord64 Maybe sqltype=word64type + + -- New for Alonzo + validContract Bool -- False if the contract is invalid, True otherwise. + scriptSize Word64 sqltype=word31type + UniqueTx hash + + Datum + hash ByteString sqltype=hash32type + txId TxId OnDeleteCascade + value Text Maybe sqltype=jsonb + bytes ByteString sqltype=bytea + UniqueDatum hash + + RedeemerData + hash ByteString sqltype=hash32type + txId TxId OnDeleteCascade + value Text Maybe sqltype=jsonb + bytes ByteString sqltype=bytea + UniqueRedeemerData hash + |] diff --git a/cardano-db/src/Cardano/Db/Query.hs b/cardano-db/src/Cardano/Db/Query.hs index c9bd537d9..095a9eadd 100644 --- a/cardano-db/src/Cardano/Db/Query.hs +++ b/cardano-db/src/Cardano/Db/Query.hs @@ -7,78 +7,48 @@ module Cardano.Db.Query ( LookupFail (..) - , queryAddressBalanceAtSlot - , queryAddressOutputs - , queryGenesis - , queryBlock + + -- queries used by db-sync , queryBlockCount , queryBlockCountAfterBlockNo - , queryBlockHeight - , queryBlockId - , queryBlockNoId - , queryBlockSlotNo + , queryBlockHashBlockNo , queryBlockNo - , queryMainBlock + , queryBlockSlotNo + , queryReverseIndexBlockId + , queryMinIdsAfterReverseIndex , queryBlockTxCount - , queryBlocksAfterSlot + , queryBlockId , queryCalcEpochEntry - , queryCheckPoints , queryCurrentEpochNo - , queryDepositUpToBlockNo - , queryEpochEntry - , queryEpochNo - , queryRewardCount - , queryRewards , queryNormalEpochRewardCount , queryNullPoolRewardExists - , queryEpochRewardCount - , queryRewardsSpend - , queryFeesUpToBlockNo - , queryFeesUpToSlotNo + , queryGenesis , queryGenesisSupply , queryShelleyGenesisSupply , queryLatestBlock , queryLatestPoints - , queryLatestCachedEpochNo , queryLatestEpochNo , queryLatestBlockId - , queryLatestBlockNo , queryLatestSlotNo - , queryPreviousSlotNo , queryMeta - , queryMultiAssetId - , queryNetworkName - , querySlotNosGreaterThan - , queryLastSlotNoGreaterThan , queryCountSlotNosGreaterThan - , queryLastSlotNo , queryCountSlotNo - , querySchemaVersion , queryScript , queryDatum - , queryDatumPage - , queryDatumCount - , querydatumInfo , queryRedeemerData - , queryRedeemerDataPage - , queryRedeemerDataCount - , queryRedeemerDataInfo - , querySelectCount , querySlotHash - , querySlotUtcTime + , queryMultiAssetId , queryTotalSupply , queryTxCount , queryTxId - , queryTxInCount - , queryTxOutCount , queryTxOutValue , queryTxOutCredentials , queryEpochStakeCount - , queryUtxoAtBlockNo - , queryUtxoAtSlotNo - , queryWithdrawalsUpToBlockNo - , queryCostModel - , queryAdaPots + , queryMinRefId + , existsPoolHashId + , existsPoolMetadataRefId + + -- queries used in smash , queryPoolOfflineData , queryPoolRegister , queryRetiredPools @@ -87,6 +57,31 @@ module Cardano.Db.Query , queryReservedTickers , queryDelistedPools , queryPoolOfflineFetchError + , existsDelistedPool + + -- queries used in tools + , queryDepositUpToBlockNo + , queryEpochEntry + , queryFeesUpToBlockNo + , queryFeesUpToSlotNo + , queryLatestCachedEpochNo + , queryLatestBlockNo + , querySlotNosGreaterThan + , querySlotNos + , querySlotUtcTime + , queryUtxoAtBlockNo + , queryUtxoAtSlotNo + , queryWithdrawalsUpToBlockNo + , queryAdaPots + , queryAddressBalanceAtSlot + + -- queries used only in tests + , queryAddressOutputs + , queryBlockHeight + , queryRewardCount + , queryTxInCount + , queryTxOutCount + , queryCostModel , queryScriptOutputs , queryTxInRedeemer , queryTxInFailedTx @@ -95,20 +90,15 @@ module Cardano.Db.Query , queryDelegationScript , queryWithdrawalScript , queryStakeAddressScript - , queryStakeAddressIdsAfter - , existsDelistedPool - , existsPoolHash - , existsPoolHashId - , existsPoolMetadataRefId + , querySchemaVersion + , queryPreviousSlotNo + -- utils , entityPair , isJust , listToMaybe , maybeToEither , renderLookupFail - , txOutSpentB - , txOutSpentP - , txOutUnspentP , unBlockId , unTxId , unTxInId @@ -121,13 +111,12 @@ module Cardano.Db.Query import Cardano.Slotting.Slot (SlotNo (..)) -import Control.Monad.Extra (join, mapMaybeM, whenJust) +import Control.Monad.Extra (join, whenJust) import Control.Monad.IO.Class (MonadIO) import Control.Monad.Trans.Reader (ReaderT) import Data.ByteString.Char8 (ByteString) import Data.Fixed (Micro) -import Data.Int (Int64) import Data.Maybe (fromMaybe, listToMaybe, mapMaybe) import Data.Ratio (numerator) import Data.Text (Text) @@ -135,13 +124,12 @@ import Data.Time.Clock (UTCTime (..)) import Data.Tuple.Extra (uncurry3) import Data.Word (Word64) -import Database.Esqueleto.Experimental (Entity, From, PersistEntity, PersistField, - SqlBackend, SqlExpr, SqlQuery, Value (Value, unValue), ValueList, asc, count, - countRows, desc, entityKey, entityVal, exists, from, in_, innerJoin, isNothing, - just, leftJoin, limit, max_, min_, notExists, not_, offset, on, orderBy, select, - subList_select, sum_, table, type (:&) ((:&)), unSqlBackendKey, val, valList, - where_, (&&.), (<=.), (==.), (>.), (>=.), (?.), (^.), (||.)) -import Database.Esqueleto.Experimental.From (ToFrom (..)) +import Database.Esqueleto.Experimental (Entity (..), PersistEntity, PersistField, SqlBackend, SqlExpr, + SqlQuery, Value (Value, unValue), ValueList, count, countRows, desc, entityKey, + entityVal, from, in_, innerJoin, isNothing, just, leftJoin, limit, max_, min_, + notExists, not_, on, orderBy, persistIdField, select, subList_select, sum_, table, + type (:&) ((:&)), unSqlBackendKey, val, valList, where_, (&&.), (<=.), (==.), + (>.), (>=.), (?.), (^.), (||.), asc) import Database.Persist.Class.PersistQuery (selectList) import Cardano.Db.Error @@ -154,58 +142,13 @@ import Database.Persist.Types (SelectOpt (Asc)) {- HLINT ignore "Reduce duplication" -} -- If you squint, these Esqueleto queries almost look like SQL queries. +-- +-- Queries in this module are split in a hierchical order. First queries that are used by db-sync +-- during syncing, then by smash, by tools and finally by test. This is useful to make sure we have +-- all the necessary indexes during syncing, but not more than that, based on the queries db-sync +-- does. -queryAddressBalanceAtSlot :: MonadIO m => Text -> Word64 -> ReaderT SqlBackend m Ada -queryAddressBalanceAtSlot addr slotNo = do - eblkId <- select $ do - blk <- from (table @Block) - where_ (blk ^. BlockSlotNo ==. just (val slotNo)) - pure (blk ^. BlockId) - maybe (pure 0) (queryAddressBalanceAtBlockId . unValue) (listToMaybe eblkId) - where - queryAddressBalanceAtBlockId :: MonadIO m => BlockId -> ReaderT SqlBackend m Ada - queryAddressBalanceAtBlockId blkid = do - -- tx1 refers to the tx of the input spending this output (if it is ever spent) - -- tx2 refers to the tx of the output - res <- select $ do - (txout :& _ :& _ :& blk :& _) <- - from $ table @TxOut - `leftJoin` table @TxIn - `on` (\(txout :& txin) -> just (txout ^. TxOutTxId) ==. txin ?. TxInTxOutId) - `leftJoin` table @Tx - `on` (\(_ :& txin :& tx1) -> txin ?. TxInTxInId ==. tx1 ?. TxId) - `leftJoin` table @Block - `on` (\(_ :& _ :& tx1 :& blk) -> tx1 ?. TxBlockId ==. blk ?. BlockId) - `leftJoin` table @Tx - `on` (\(txout :& _ :& _ :& _ :& tx2) -> just (txout ^. TxOutTxId) ==. tx2 ?. TxId ) - where_ $ (txout ^. TxOutTxId `in_` txLessEqual blkid) - &&. (isNothing (blk ?. BlockBlockNo)||. (blk ?. BlockId >. just (val blkid))) - where_ (txout ^. TxOutAddress ==. val addr) - pure $ sum_ (txout ^. TxOutValue) - pure $ unValueSumAda (listToMaybe res) - -queryAddressOutputs :: MonadIO m => ByteString -> ReaderT SqlBackend m DbLovelace -queryAddressOutputs addr = do - res <- select $ do - txout <- from $ table @TxOut - where_ (txout ^. TxOutAddressRaw ==. val addr) - pure $ sum_ (txout ^. TxOutValue) - pure $ convert (listToMaybe res) - where - convert v = case unValue <$> v of - Just (Just x) -> x - _ -> DbLovelace 0 - -queryGenesis :: MonadIO m => ReaderT SqlBackend m (Either LookupFail BlockId) -queryGenesis = do - res <- select $ do - blk <- from (table @Block) - where_ (isNothing (blk ^. BlockPreviousId)) - pure $ blk ^. BlockId - case res of - [blk] -> pure $ Right (unValue blk) - _ -> pure $ Left DBMultipleGenesis - +{-} -- | Get the 'Block' associated with the given hash. queryBlock :: MonadIO m => ByteString -> ReaderT SqlBackend m (Either LookupFail Block) queryBlock hash = do @@ -214,6 +157,7 @@ queryBlock hash = do where_ (blk ^. BlockHash ==. val hash) pure blk pure $ maybeToEither (DbLookupBlockHash hash) entityVal (listToMaybe res) + -} -- | Count the number of blocks in the Block table. queryBlockCount :: MonadIO m => ReaderT SqlBackend m Word @@ -223,24 +167,6 @@ queryBlockCount = do pure countRows pure $ maybe 0 unValue (listToMaybe res) --- | Get the 'BlockId' associated with the given hash. -queryBlockId :: MonadIO m => ByteString -> ReaderT SqlBackend m (Either LookupFail BlockId) -queryBlockId hash = do - res <- select $ do - blk <- from $ table @Block - where_ (blk ^. BlockHash ==. val hash) - pure $ blk ^. BlockId - pure $ maybeToEither (DbLookupBlockHash hash) unValue (listToMaybe res) - --- | Get the 'BlockId' associated with the given hash. -queryBlockNoId :: MonadIO m => ByteString -> ReaderT SqlBackend m (Either LookupFail (BlockId, Maybe Word64)) -queryBlockNoId hash = do - res <- select $ do - blk <- from $ table @Block - where_ (blk ^. BlockHash ==. val hash) - pure (blk ^. BlockId, blk ^. BlockBlockNo) - pure $ maybeToEither (DbLookupBlockHash hash) unValue2 (listToMaybe res) - -- | Count the number of blocks in the Block table after a 'BlockNo'. queryBlockCountAfterBlockNo :: MonadIO m => Word64 -> Bool -> ReaderT SqlBackend m Word queryBlockCountAfterBlockNo blockNo queryEq = do @@ -251,13 +177,13 @@ queryBlockCountAfterBlockNo blockNo queryEq = do pure countRows pure $ maybe 0 unValue (listToMaybe res) --- | Get the 'SlotNo' associated with the given hash. -queryBlockSlotNo :: MonadIO m => ByteString -> ReaderT SqlBackend m (Either LookupFail (Maybe Word64)) -queryBlockSlotNo hash = do +-- | Get the 'BlockNo' associated with the given hash. +queryBlockHashBlockNo :: MonadIO m => ByteString -> ReaderT SqlBackend m (Either LookupFail (Maybe Word64)) +queryBlockHashBlockNo hash = do res <- select $ do blk <- from $ table @Block where_ (blk ^. BlockHash ==. val hash) - pure $ blk ^. BlockSlotNo + pure $ blk ^. BlockBlockNo pure $ maybeToEither (DbLookupBlockHash hash) unValue (listToMaybe res) queryBlockNo :: MonadIO m => Word64 -> ReaderT SqlBackend m (Maybe BlockId) @@ -268,35 +194,34 @@ queryBlockNo blkNo = do pure (blk ^. BlockId) pure $ fmap unValue (listToMaybe res) --- | Get the current block height. -queryBlockHeight :: MonadIO m => ReaderT SqlBackend m (Maybe Word64) -queryBlockHeight = do +queryBlockSlotNo :: MonadIO m => Word64 -> ReaderT SqlBackend m (Maybe BlockId) +queryBlockSlotNo slotNo = do res <- select $ do blk <- from $ table @Block - where_ (isJust $ blk ^. BlockBlockNo) - orderBy [desc (blk ^. BlockBlockNo)] - limit 1 - pure (blk ^. BlockBlockNo) - pure $ unValue =<< listToMaybe res + where_ (blk ^. BlockSlotNo ==. just (val slotNo)) + pure (blk ^. BlockId) + pure $ fmap unValue (listToMaybe res) --- | Get the latest 'Block' associated with the given hash, skipping any EBBs. -queryMainBlock :: MonadIO m => ByteString -> ReaderT SqlBackend m (Either LookupFail Block) -queryMainBlock hash = do - res <- select $ do - blk <- from $ table @Block - where_ (blk ^. BlockHash ==. val hash) - pure $ blk ^. BlockId - maybe (pure $ Left (DbLookupBlockHash hash)) queryMainBlockId (unValue <$> listToMaybe res) - where - queryMainBlockId :: MonadIO m => BlockId -> ReaderT SqlBackend m (Either LookupFail Block) - queryMainBlockId blkid = do - res <- select $ do - blk <- from $ table @Block - where_ (isJust (blk ^. BlockBlockNo) &&. blk ^. BlockId <=. val blkid) - orderBy [desc (blk ^. BlockSlotNo)] - limit 1 - pure blk - pure $ maybeToEither (DbLookupBlockId $ unBlockId blkid) entityVal (listToMaybe res) +queryReverseIndexBlockId :: MonadIO m => BlockId -> ReaderT SqlBackend m [Maybe Text] +queryReverseIndexBlockId blockId = do + res <- select $ do + (blk :& ridx) <- + from $ table @Block + `leftJoin` table @ReverseIndex + `on` (\(blk :& ridx) -> just (blk ^. BlockId) ==. ridx ?. ReverseIndexBlockId) + where_ (blk ^. BlockId >=. val blockId) + orderBy [asc (blk ^. BlockId)] + pure $ ridx ?. ReverseIndexMinIds + pure $ fmap unValue res + +queryMinIdsAfterReverseIndex :: MonadIO m => ReverseIndexId -> ReaderT SqlBackend m [Text] +queryMinIdsAfterReverseIndex rollbackId = do + res <- select $ do + rl <- from $ table @ReverseIndex + where_ (rl ^. ReverseIndexId >=. val rollbackId) + orderBy [desc (rl ^. ReverseIndexId)] + pure $ rl ^. ReverseIndexMinIds + pure $ fmap unValue res -- | Get the number of transactions in the specified block. queryBlockTxCount :: MonadIO m => BlockId -> ReaderT SqlBackend m Word64 @@ -307,13 +232,14 @@ queryBlockTxCount blkId = do pure countRows pure $ maybe 0 unValue (listToMaybe res) -queryBlocksAfterSlot :: MonadIO m => Word64 -> ReaderT SqlBackend m Int -queryBlocksAfterSlot slotNo = do +-- | Get the 'BlockId' associated with the given hash. +queryBlockId :: MonadIO m => ByteString -> ReaderT SqlBackend m (Either LookupFail BlockId) +queryBlockId hash = do res <- select $ do blk <- from $ table @Block - where_ (blk ^. BlockSlotNo >. just (val slotNo)) - pure countRows - pure $ maybe 0 unValue (listToMaybe res) + where_ (blk ^. BlockHash ==. val hash) + pure $ blk ^. BlockId + pure $ maybeToEither (DbLookupBlockHash hash) unValue (listToMaybe res) -- | Calculate the Epoch table entry for the specified epoch. -- When syncing the chain or filling an empty table, this is called at each epoch boundary to @@ -372,61 +298,6 @@ queryCalcEpochEntry epochNum = do defaultUTCTime :: UTCTime defaultUTCTime = read "2000-01-01 00:00:00.000000 UTC" -queryCheckPoints :: MonadIO m => Word64 -> ReaderT SqlBackend m [(Word64, ByteString)] -queryCheckPoints limitCount = do - latest <- select $ do - blk <- from $ table @Block - where_ (isJust $ blk ^. BlockSlotNo) - orderBy [desc (blk ^. BlockSlotNo)] - limit (fromIntegral limitCount) - pure (blk ^. BlockSlotNo) - case mapMaybe unValue latest of - [] -> pure [] - xs -> mapMaybeM querySpacing xs - where - querySpacing :: MonadIO m => Word64 -> ReaderT SqlBackend m (Maybe (Word64, ByteString)) - querySpacing blkNo = do - rows <- select $ do - blk <- from $ table @Block - where_ (blk ^. BlockSlotNo ==. just (val blkNo)) - pure (blk ^. BlockSlotNo, blk ^. BlockHash) - pure $ convert =<< listToMaybe rows - - convert :: (Value (Maybe Word64), Value ByteString) -> Maybe (Word64, ByteString) - convert (va, vb) = - case (unValue va, unValue vb) of - (Nothing, _ ) -> Nothing - (Just a, b) -> Just (a, b) - -queryDepositUpToBlockNo :: MonadIO m => Word64 -> ReaderT SqlBackend m Ada -queryDepositUpToBlockNo blkNo = do - res <- select $ do - (tx :& blk) <- - from $ table @Tx - `innerJoin` table @Block - `on` (\(tx :& blk) -> tx ^. TxBlockId ==. blk ^. BlockId) - where_ (blk ^. BlockBlockNo <=. just (val blkNo)) - pure $ sum_ (tx ^. TxDeposit) - pure $ unValueSumAda (listToMaybe res) - -queryEpochEntry :: MonadIO m => Word64 -> ReaderT SqlBackend m (Either LookupFail Epoch) -queryEpochEntry epochNum = do - res <- select $ do - epoch <- from $ table @Epoch - where_ (epoch ^. EpochNo ==. val epochNum) - pure epoch - pure $ maybeToEither (DbLookupEpochNo epochNum) entityVal (listToMaybe res) - --- | Get the Epoch number for a given block. Returns '0' for the genesis block --- even though the DB entry for the genesis block is 'NULL'. -queryEpochNo :: MonadIO m => BlockId -> ReaderT SqlBackend m (Either LookupFail (Maybe Word64)) -queryEpochNo blkId = do - res <- select $ do - blk <- from $ table @Block - where_ (blk ^. BlockId ==. val blkId) - pure $ blk ^. BlockEpochNo - pure $ maybeToEither (DbLookupBlockId $ unBlockId blkId) unValue (listToMaybe res) - queryCurrentEpochNo :: MonadIO m => ReaderT SqlBackend m (Maybe Word64) queryCurrentEpochNo = do res <- select $ do @@ -434,37 +305,6 @@ queryCurrentEpochNo = do pure $ max_ (blk ^. BlockEpochNo) pure $ join (unValue =<< listToMaybe res) -queryRewardCount :: MonadIO m => ReaderT SqlBackend m Word64 -queryRewardCount = do - res <- select $ do - _ <- from $ table @Reward - pure countRows - pure $ maybe 0 unValue (listToMaybe res) - -queryEpochRewardCount :: MonadIO m => Word64 -> ReaderT SqlBackend m Word64 -queryEpochRewardCount epochNum = do - res <- select $ do - rwds <- from $ table @Reward - where_ (rwds ^. RewardSpendableEpoch ==. val epochNum) - pure countRows - pure $ maybe 0 unValue (listToMaybe res) - -queryRewardsSpend :: MonadIO m => Word64 -> ReaderT SqlBackend m [Reward] -queryRewardsSpend epochNum = do - res <- select $ do - rwds <- from $ table @Reward - where_ (rwds ^. RewardSpendableEpoch ==. val epochNum) - pure rwds - pure $ entityVal <$> res - -queryRewards :: MonadIO m => Word64 -> ReaderT SqlBackend m [Reward] -queryRewards epochNum = do - res <- select $ do - rwds <- from $ table @Reward - where_ (rwds ^. RewardEarnedEpoch ==. val epochNum) - pure rwds - pure $ entityVal <$> res - queryNormalEpochRewardCount :: MonadIO m => Word64 -> ReaderT SqlBackend m Word64 @@ -487,29 +327,15 @@ queryNullPoolRewardExists newRwd = do pure (rwd ^. RewardId) pure $ not (null res) --- | Get the fees paid in all block from genesis up to and including the specified block. -queryFeesUpToBlockNo :: MonadIO m => Word64 -> ReaderT SqlBackend m Ada -queryFeesUpToBlockNo blkNo = do - res <- select $ do - (tx :& blk) <- - from $ table @Tx - `innerJoin` table @Block - `on` (\(tx :& blk) -> tx ^. TxBlockId ==. blk ^. BlockId) - where_ (blk ^. BlockBlockNo <=. just (val blkNo)) - pure $ sum_ (tx ^. TxFee) - pure $ unValueSumAda (listToMaybe res) - -queryFeesUpToSlotNo :: MonadIO m => Word64 -> ReaderT SqlBackend m Ada -queryFeesUpToSlotNo slotNo = do +queryGenesis :: MonadIO m => ReaderT SqlBackend m (Either LookupFail BlockId) +queryGenesis = do res <- select $ do - (tx :& blk) <- - from $ table @Tx - `innerJoin` table @Block - `on` (\(tx :& blk) -> tx ^. TxBlockId ==. blk ^. BlockId) - where_ (isJust $ blk ^. BlockSlotNo) - where_ (blk ^. BlockSlotNo <=. just (val slotNo)) - pure $ sum_ (tx ^. TxFee) - pure $ unValueSumAda (listToMaybe res) + blk <- from (table @Block) + where_ (isNothing (blk ^. BlockPreviousId)) + pure $ blk ^. BlockId + case res of + [blk] -> pure $ Right (unValue blk) + _ -> pure $ Left DBMultipleGenesis -- | Return the total Genesis coin supply. queryGenesisSupply :: MonadIO m => ReaderT SqlBackend m Ada @@ -541,27 +367,6 @@ queryShelleyGenesisSupply = do pure $ sum_ (txOut ^. TxOutValue) pure $ unValueSumAda (listToMaybe res) --- | Get 'BlockId' of the latest block. -queryLatestBlockId :: MonadIO m => ReaderT SqlBackend m (Maybe BlockId) -queryLatestBlockId = do - res <- select $ do - blk <- from $ table @Block - orderBy [desc (blk ^. BlockSlotNo)] - limit 1 - pure (blk ^. BlockId) - pure $ fmap unValue (listToMaybe res) - --- | Get the 'BlockNo' of the latest block. -queryLatestBlockNo :: MonadIO m => ReaderT SqlBackend m (Maybe Word64) -queryLatestBlockNo = do - res <- select $ do - blk <- from $ table @Block - where_ (isJust $ blk ^. BlockBlockNo) - orderBy [desc (blk ^. BlockBlockNo)] - limit 1 - pure $ blk ^. BlockBlockNo - pure $ listToMaybe (mapMaybe unValue res) - -- | Get the latest block. queryLatestBlock :: MonadIO m => ReaderT SqlBackend m (Maybe Block) queryLatestBlock = do @@ -583,15 +388,6 @@ queryLatestPoints = do pure (blk ^. BlockSlotNo, blk ^. BlockHash) pure $ fmap unValue2 res -queryLatestCachedEpochNo :: MonadIO m => ReaderT SqlBackend m (Maybe Word64) -queryLatestCachedEpochNo = do - res <- select $ do - epoch <- from $ table @Epoch - orderBy [desc (epoch ^. EpochNo)] - limit 1 - pure (epoch ^. EpochNo) - pure $ unValue <$> listToMaybe res - queryLatestEpochNo :: MonadIO m => ReaderT SqlBackend m Word64 queryLatestEpochNo = do res <- select $ do @@ -602,9 +398,19 @@ queryLatestEpochNo = do pure (blk ^. BlockEpochNo) pure $ fromMaybe 0 (unValue =<< listToMaybe res) --- | Get the latest slot number -queryLatestSlotNo :: MonadIO m => ReaderT SqlBackend m Word64 -queryLatestSlotNo = do +-- | Get 'BlockId' of the latest block. +queryLatestBlockId :: MonadIO m => ReaderT SqlBackend m (Maybe BlockId) +queryLatestBlockId = do + res <- select $ do + blk <- from $ table @Block + orderBy [desc (blk ^. BlockSlotNo)] + limit 1 + pure (blk ^. BlockId) + pure $ fmap unValue (listToMaybe res) + +-- | Get the latest slot number +queryLatestSlotNo :: MonadIO m => ReaderT SqlBackend m Word64 +queryLatestSlotNo = do res <- select $ do blk <- from $ table @Block where_ (isJust $ blk ^. BlockSlotNo) @@ -613,26 +419,15 @@ queryLatestSlotNo = do pure $ blk ^. BlockSlotNo pure $ fromMaybe 0 (unValue =<< listToMaybe res) --- | Given a 'SlotNo' return the 'SlotNo' of the previous block. -queryPreviousSlotNo :: MonadIO m => Word64 -> ReaderT SqlBackend m (Maybe Word64) -queryPreviousSlotNo slotNo = do - res <- select $ do - (blk :& pblk) <- - from $ table @Block - `innerJoin` table @Block - `on` (\(blk :& pblk) -> blk ^. BlockPreviousId ==. just (pblk ^. BlockId)) - where_ (blk ^. BlockSlotNo ==. just (val slotNo)) - pure $ pblk ^. BlockSlotNo - pure $ unValue =<< listToMaybe res - -querySchemaVersion :: MonadIO m => ReaderT SqlBackend m (Maybe SchemaVersion) -querySchemaVersion = do - res <- select $ do - sch <- from $ table @SchemaVersion - orderBy [desc (sch ^. SchemaVersionStageOne)] - limit 1 - pure (sch ^. SchemaVersionStageOne, sch ^. SchemaVersionStageTwo, sch ^. SchemaVersionStageThree) - pure $ uncurry3 SchemaVersion . unValue3 <$> listToMaybe res +{-# INLINABLE queryMeta #-} +-- | Get the network metadata. +queryMeta :: MonadIO m => ReaderT SqlBackend m (Either LookupFail Meta) +queryMeta = do + res <- select . from $ table @Meta + pure $ case res of + [] -> Left DbMetaEmpty + [m] -> Right $ entityVal m + _ -> Left DbMetaMultipleRows queryScript :: MonadIO m => ByteString -> ReaderT SqlBackend m (Maybe ScriptId) queryScript hsh = do @@ -650,37 +445,6 @@ queryDatum hsh = do pure (datum ^. DatumId) pure $ unValue <$> listToMaybe xs -queryDatumPage :: MonadIO m => Int64 -> Int64 -> ReaderT SqlBackend m [Entity Datum] -queryDatumPage ofs lmt = - select $ do - datum <- from $ table @Datum - orderBy [asc (datum ^. DatumId)] - limit lmt - offset ofs - pure datum - -queryDatumCount :: MonadIO m => ReaderT SqlBackend m Word64 -queryDatumCount = do - xs <- select $ do - _ <- from $ table @Datum - pure countRows - pure $ maybe 0 unValue (listToMaybe xs) - -querydatumInfo :: MonadIO m => DatumId -> ReaderT SqlBackend m (Maybe (ByteString, Maybe Word64)) -querydatumInfo datumId = do - res <- select $ do - (_blk :& _tx :& datum :& prevBlock) <- - from $ table @Block - `innerJoin` table @Tx - `on` (\(blk :& tx) -> tx ^. TxBlockId ==. blk ^. BlockId) - `innerJoin` table @Datum - `on` (\(_blk :& tx :& datum) -> datum ^. DatumTxId ==. tx ^. TxId) - `innerJoin` table @Block - `on` (\(blk :& _tx :& _datum :& prevBlk) -> blk ^. BlockPreviousId ==. just (prevBlk ^. BlockId)) - where_ (datum ^. DatumId ==. val datumId) - pure (prevBlock ^. BlockHash, prevBlock ^. BlockSlotNo) - pure $ unValue2 <$> listToMaybe res - queryRedeemerData :: MonadIO m => ByteString -> ReaderT SqlBackend m (Maybe RedeemerDataId) queryRedeemerData hsh = do xs <- select $ do @@ -689,47 +453,6 @@ queryRedeemerData hsh = do pure (rdmrDt ^. RedeemerDataId) pure $ unValue <$> listToMaybe xs -queryRedeemerDataPage :: MonadIO m => Int64 -> Int64 -> ReaderT SqlBackend m [Entity RedeemerData] -queryRedeemerDataPage ofs lmt = - select $ do - redeemerData <- from $ table @RedeemerData - orderBy [asc (redeemerData ^. RedeemerDataId)] - limit lmt - offset ofs - pure redeemerData - -queryRedeemerDataCount :: MonadIO m => ReaderT SqlBackend m Word64 -queryRedeemerDataCount = do - xs <- select $ do - _ <- from $ table @RedeemerData - pure countRows - pure $ maybe 0 unValue (listToMaybe xs) - -queryRedeemerDataInfo :: MonadIO m => RedeemerDataId -> ReaderT SqlBackend m (Maybe (ByteString, Maybe Word64)) -queryRedeemerDataInfo rdmDataId = do - res <- select $ do - (_blk :& _tx :& rdmData :& prevBlock) <- - from $ table @Block - `innerJoin` table @Tx - `on` (\(blk :& tx) -> tx ^. TxBlockId ==. blk ^. BlockId) - `innerJoin` table @RedeemerData - `on` (\(_blk :& tx :& rdmData) -> rdmData ^. RedeemerDataTxId ==. tx ^. TxId) - `innerJoin` table @Block - `on` (\(blk :& _tx :& _rdmData :& prevBlk) -> blk ^. BlockPreviousId ==. just (prevBlk ^. BlockId)) - where_ (rdmData ^. RedeemerDataId ==. val rdmDataId) - pure (prevBlock ^. BlockHash, prevBlock ^. BlockSlotNo) - pure $ unValue2 <$> listToMaybe res - --- | Count the number of rows that match the select with the supplied predicate. -querySelectCount :: forall table table' m . (MonadIO m, PersistEntity table, ToFrom (From (SqlExpr (Entity table))) table') - => (table' -> SqlQuery ()) -> ReaderT SqlBackend m Word -querySelectCount predicate = do - xs <- select $ do - x <- from (table @table) - predicate x - pure countRows - pure $ maybe 0 unValue (listToMaybe xs) - querySlotHash :: MonadIO m => SlotNo -> ReaderT SqlBackend m [(SlotNo, ByteString)] querySlotHash slotNo = do res <- select $ do @@ -738,16 +461,6 @@ querySlotHash slotNo = do pure (blk ^. BlockHash) pure $ (\vh -> (slotNo, unValue vh)) <$> res -{-# INLINABLE queryMeta #-} --- | Get the network metadata. -queryMeta :: MonadIO m => ReaderT SqlBackend m (Either LookupFail Meta) -queryMeta = do - res <- select . from $ table @Meta - pure $ case res of - [] -> Left DbMetaEmpty - [m] -> Right $ entityVal m - _ -> Left DbMetaMultipleRows - queryMultiAssetId :: MonadIO m => ByteString -> ByteString -> ReaderT SqlBackend m (Maybe MultiAssetId) queryMultiAssetId policy assetName = do res <- select $ do @@ -756,39 +469,6 @@ queryMultiAssetId policy assetName = do pure (ma ^. MultiAssetId) pure $ unValue <$> listToMaybe res --- | Get the network name from the Meta table. -queryNetworkName :: MonadIO m => ReaderT SqlBackend m (Maybe Text) -queryNetworkName = do - res <- select $ do - meta <- from $ table @Meta - pure (meta ^. MetaNetworkName) - pure $ unValue <$> listToMaybe res - -querySlotNosGreaterThan :: MonadIO m => Word64 -> ReaderT SqlBackend m [SlotNo] -querySlotNosGreaterThan slotNo = do - res <- select $ do - blk <- from $ table @Block - -- Want all BlockNos where the block satisfies this predicate. - where_ (blk ^. BlockSlotNo >. just (val slotNo)) - -- Return them in descending order so we can delete the highest numbered - -- ones first. - orderBy [desc (blk ^. BlockSlotNo)] - pure (blk ^. BlockSlotNo) - pure $ mapMaybe (fmap SlotNo . unValue) res - -queryLastSlotNoGreaterThan :: MonadIO m => Word64 -> ReaderT SqlBackend m (Maybe SlotNo) -queryLastSlotNoGreaterThan slotNo = do - res <- select $ do - blk <- from $ table @Block - -- Want all BlockNos where the block satisfies this predicate. - where_ (blk ^. BlockSlotNo >. just (val slotNo)) - -- Return them in descending order so we can delete the highest numbered - -- ones first. - orderBy [desc (blk ^. BlockSlotNo)] - limit 1 - pure (blk ^. BlockSlotNo) - pure $ listToMaybe $ mapMaybe (fmap SlotNo . unValue) res - queryCountSlotNosGreaterThan :: MonadIO m => Word64 -> ReaderT SqlBackend m Word64 queryCountSlotNosGreaterThan slotNo = do res <- select $ do @@ -797,16 +477,6 @@ queryCountSlotNosGreaterThan slotNo = do pure countRows pure $ maybe 0 unValue (listToMaybe res) --- | Like 'queryLastSlotNoGreaterThan', but returns all slots in the same order. -queryLastSlotNo :: MonadIO m => ReaderT SqlBackend m (Maybe SlotNo) -queryLastSlotNo = do - res <- select $ do - blk <- from $ table @Block - orderBy [desc (blk ^. BlockSlotNo)] - limit 1 - pure (blk ^. BlockSlotNo) - pure $ listToMaybe $ mapMaybe (fmap SlotNo . unValue) res - -- | Like 'queryCountSlotNosGreaterThan', but returns all slots in the same order. queryCountSlotNo :: MonadIO m => ReaderT SqlBackend m Word64 queryCountSlotNo = do @@ -815,16 +485,6 @@ queryCountSlotNo = do pure countRows pure $ maybe 0 unValue (listToMaybe res) --- | Calculate the slot time (as UTCTime) for a given slot number. --- This will fail if the slot is empty. -querySlotUtcTime :: MonadIO m => Word64 -> ReaderT SqlBackend m (Either LookupFail UTCTime) -querySlotUtcTime slotNo = do - le <- select $ do - blk <- from $ table @Block - where_ (blk ^. BlockSlotNo ==. just (val slotNo)) - pure (blk ^. BlockTime) - pure $ maybe (Left $ DbLookupSlotNo slotNo) (Right . unValue) (listToMaybe le) - -- | Get the current total supply of Lovelace. This only returns the on-chain supply which -- does not include staking rewards that have not yet been withdrawn. Before wihdrawal -- rewards are part of the ledger state and hence not on chain. @@ -853,18 +513,6 @@ queryTxId hash = do pure tx pure $ maybeToEither (DbLookupTxHash hash) entityKey (listToMaybe res) --- | Count the number of transactions in the Tx table. -queryTxInCount :: MonadIO m => ReaderT SqlBackend m Word -queryTxInCount = do - res <- select $ from (table @TxIn) >> pure countRows - pure $ maybe 0 unValue (listToMaybe res) - --- | Count the number of transaction outputs in the TxOut table. -queryTxOutCount :: MonadIO m => ReaderT SqlBackend m Word -queryTxOutCount = do - res <- select $ from (table @TxOut) >> pure countRows - pure $ maybe 0 unValue (listToMaybe res) - -- | Give a (tx hash, index) pair, return the TxOut value. queryTxOutValue :: MonadIO m => (ByteString, Word64) -> ReaderT SqlBackend m (Either LookupFail (TxId, DbLovelace)) queryTxOutValue (hash, index) = do @@ -887,34 +535,6 @@ queryTxOutCredentials (hash, index) = do pure (txOut ^. TxOutPaymentCred, txOut ^. TxOutAddressHasScript) pure $ maybeToEither (DbLookupTxHash hash) unValue2 (listToMaybe res) --- | Get the UTxO set after the specified 'BlockId' has been applied to the chain. --- Not exported because 'BlockId' to 'BlockHash' relationship may not be the same --- across machines. -queryUtxoAtBlockId :: MonadIO m => BlockId -> ReaderT SqlBackend m [(TxOut, ByteString)] -queryUtxoAtBlockId blkid = do - outputs <- select $ do - (txout :& _txin :& _tx1 :& blk :& tx2) <- - from $ table @TxOut - `leftJoin` table @TxIn - `on` (\(txout :& txin) -> (just (txout ^. TxOutTxId) ==. txin ?. TxInTxOutId) &&. - (just (txout ^. TxOutIndex) ==. txin ?. TxInTxOutIndex)) - `leftJoin` table @Tx - `on` (\(_txout :& txin :& tx1) -> txin ?. TxInTxInId ==. tx1 ?. TxId) - `leftJoin` table @Block - `on` (\(_txout :& _txin :& tx1 :& blk) -> tx1 ?. TxBlockId ==. blk ?. BlockId) - `leftJoin` table @Tx - `on` (\(txout :& _ :& _ :& _ :& tx2) -> just (txout ^. TxOutTxId) ==. tx2 ?. TxId ) - - where_ $ (txout ^. TxOutTxId `in_` txLessEqual blkid) &&. - (isNothing (blk ?. BlockBlockNo)||. (blk ?. BlockId >. just (val blkid))) - pure (txout, tx2 ?. TxHash) - pure $ mapMaybe convert outputs - where - convert :: (Entity TxOut, Value (Maybe ByteString)) -> Maybe (TxOut, ByteString) - convert = \case - (out, Value (Just hash')) -> Just (entityVal out, hash') - (_, Value Nothing) -> Nothing - queryEpochStakeCount :: MonadIO m => Word64 -> ReaderT SqlBackend m Word64 queryEpochStakeCount epoch = do res <- select $ do @@ -923,24 +543,29 @@ queryEpochStakeCount epoch = do pure countRows pure $ maybe 0 unValue (listToMaybe res) -queryUtxoAtBlockNo :: MonadIO m => Word64 -> ReaderT SqlBackend m [(TxOut, ByteString)] -queryUtxoAtBlockNo blkNo = do - eblkId <- select $ do - blk <- from $ table @Block - where_ (blk ^. BlockBlockNo ==. just (val blkNo)) - pure (blk ^. BlockId) - maybe (pure []) (queryUtxoAtBlockId . unValue) (listToMaybe eblkId) +queryMinRefId + :: forall m field record. (MonadIO m, PersistEntity record, PersistField field) + => EntityField record field -> field -> ReaderT SqlBackend m (Maybe (Key record)) +queryMinRefId txIdField txId = do + res <- select $ do + rec <- from $ table @record + where_ (rec ^. txIdField >=. val txId) + orderBy [asc (rec ^. txIdField)] + limit 1 + pure $ rec ^. persistIdField + pure $ unValue <$> listToMaybe res -queryUtxoAtSlotNo :: MonadIO m => Word64 -> ReaderT SqlBackend m [(TxOut, ByteString)] -queryUtxoAtSlotNo slotNo = do - eblkId <- select $ do - blk <- from $ table @Block - where_ (blk ^. BlockSlotNo ==. just (val slotNo)) - pure (blk ^. BlockId) - maybe (pure []) (queryUtxoAtBlockId . unValue) (listToMaybe eblkId) +existsPoolHashId :: MonadIO m => PoolHashId -> ReaderT SqlBackend m Bool +existsPoolHashId phid = do + res <- select $ do + poolHash <- from $ table @PoolHash + where_ (poolHash ^. PoolHashId ==. val phid) + limit 1 + pure (poolHash ^. PoolHashId) + pure $ not (null res) + +{-} -queryWithdrawalsUpToBlockNo :: MonadIO m => Word64 -> ReaderT SqlBackend m Ada -queryWithdrawalsUpToBlockNo blkNo = do res <- select $ do (_tx :& wdrl :& blk) <- from $ table @Tx @@ -953,41 +578,22 @@ queryWithdrawalsUpToBlockNo blkNo = do pure $ sum_ (wdrl ^. WithdrawalAmount) pure $ unValueSumAda (listToMaybe res) -queryCostModel :: MonadIO m => ReaderT SqlBackend m [CostModelId] -queryCostModel = - fmap entityKey <$> selectList [] [Asc CostModelId] - -queryAdaPots :: MonadIO m => BlockId -> ReaderT SqlBackend m (Maybe AdaPots) -queryAdaPots blkId = do - res <- select $ do - adaPots <- from $ table @AdaPots - where_ (adaPots ^. AdaPotsBlockId ==. val blkId) - pure adaPots - pure $ fmap entityVal (listToMaybe res) -queryUsedTicker :: MonadIO m => ByteString -> ByteString -> ReaderT SqlBackend m (Maybe Text) -queryUsedTicker poolHash metaHash = do - res <- select $ do - (pod :& ph) <- - from $ table @PoolOfflineData - `innerJoin` table @PoolHash - `on` (\(pod :& ph) -> ph ^. PoolHashId ==. pod ^. PoolOfflineDataPoolId) - where_ (ph ^. PoolHashHashRaw ==. val poolHash) - where_ (pod ^. PoolOfflineDataHash ==. val metaHash) - pure $ pod ^. PoolOfflineDataTickerName - pure $ unValue <$> listToMaybe res +-} -queryReservedTicker :: MonadIO m => Text -> ReaderT SqlBackend m (Maybe ByteString) -queryReservedTicker tickerName = do +-- db-sync +existsPoolMetadataRefId :: MonadIO m => PoolMetadataRefId -> ReaderT SqlBackend m Bool +existsPoolMetadataRefId pmrid = do res <- select $ do - ticker <- from $ table @ReservedPoolTicker - where_ (ticker ^. ReservedPoolTickerName ==. val tickerName) - pure $ ticker ^. ReservedPoolTickerPoolHash - pure $ unValue <$> listToMaybe res + pmr <- from $ table @PoolMetadataRef + where_ (pmr ^. PoolMetadataRefId ==. val pmrid) + limit 1 + pure (pmr ^. PoolMetadataRefId) + pure $ not (null res) -queryReservedTickers :: MonadIO m => ReaderT SqlBackend m [ReservedPoolTicker] -queryReservedTickers = - fmap entityVal <$> selectList [] [] +{-------------------------------------------- + Queries use in SMASH +----------------------------------------------} queryPoolOfflineData :: MonadIO m => ByteString -> ByteString -> ReaderT SqlBackend m (Maybe (Text, ByteString)) queryPoolOfflineData poolHash poolMetadataHash = do @@ -1053,6 +659,30 @@ queryRetiredPools mPoolHash = do , pcCertNo = CertNo blkNo txIndex retIndex } +queryUsedTicker :: MonadIO m => ByteString -> ByteString -> ReaderT SqlBackend m (Maybe Text) +queryUsedTicker poolHash metaHash = do + res <- select $ do + (pod :& ph) <- + from $ table @PoolOfflineData + `innerJoin` table @PoolHash + `on` (\(pod :& ph) -> ph ^. PoolHashId ==. pod ^. PoolOfflineDataPoolId) + where_ (ph ^. PoolHashHashRaw ==. val poolHash) + where_ (pod ^. PoolOfflineDataHash ==. val metaHash) + pure $ pod ^. PoolOfflineDataTickerName + pure $ unValue <$> listToMaybe res + +queryReservedTicker :: MonadIO m => Text -> ReaderT SqlBackend m (Maybe ByteString) +queryReservedTicker tickerName = do + res <- select $ do + ticker <- from $ table @ReservedPoolTicker + where_ (ticker ^. ReservedPoolTickerName ==. val tickerName) + pure $ ticker ^. ReservedPoolTickerPoolHash + pure $ unValue <$> listToMaybe res + +queryReservedTickers :: MonadIO m => ReaderT SqlBackend m [ReservedPoolTicker] +queryReservedTickers = + fmap entityVal <$> selectList [] [] + -- Return delisted Pool hashes. queryDelistedPools :: MonadIO m => ReaderT SqlBackend m [ByteString] queryDelistedPools = do @@ -1096,18 +726,274 @@ queryPoolOfflineFetchError hash (Just fromTime) = do where extract (fetchErr, metadataHash) = (entityVal fetchErr, unValue metadataHash) -queryScriptOutputs :: MonadIO m => ReaderT SqlBackend m [TxOut] -queryScriptOutputs = do - res <- select $ do - tx_out <- from $ table @TxOut - where_ (tx_out ^. TxOutAddressHasScript ==. val True) - pure tx_out - pure $ entityVal <$> res - -queryTxInRedeemer :: MonadIO m => ReaderT SqlBackend m [TxIn] -queryTxInRedeemer = do - res <- select $ do - tx_in <- from $ table @TxIn +existsDelistedPool :: MonadIO m => ByteString -> ReaderT SqlBackend m Bool +existsDelistedPool ph = do + res <- select $ do + delistedPool <- from $ table @DelistedPool + where_ (delistedPool ^. DelistedPoolHashRaw ==. val ph) + limit 1 + pure (delistedPool ^. DelistedPoolId) + pure $ not (null res) + +{--------------------------------------------------------- + Queries use in Tools (valiadtion and snapshot creation) +----------------------------------------------------------} + +queryDepositUpToBlockNo :: MonadIO m => Word64 -> ReaderT SqlBackend m Ada +queryDepositUpToBlockNo blkNo = do + res <- select $ do + (tx :& blk) <- + from $ table @Tx + `innerJoin` table @Block + `on` (\(tx :& blk) -> tx ^. TxBlockId ==. blk ^. BlockId) + where_ (blk ^. BlockBlockNo <=. just (val blkNo)) + pure $ sum_ (tx ^. TxDeposit) + pure $ unValueSumAda (listToMaybe res) + +queryEpochEntry :: MonadIO m => Word64 -> ReaderT SqlBackend m (Either LookupFail Epoch) +queryEpochEntry epochNum = do + res <- select $ do + epoch <- from $ table @Epoch + where_ (epoch ^. EpochNo ==. val epochNum) + pure epoch + pure $ maybeToEither (DbLookupEpochNo epochNum) entityVal (listToMaybe res) + +-- | Get the fees paid in all block from genesis up to and including the specified block. +queryFeesUpToBlockNo :: MonadIO m => Word64 -> ReaderT SqlBackend m Ada +queryFeesUpToBlockNo blkNo = do + res <- select $ do + (tx :& blk) <- + from $ table @Tx + `innerJoin` table @Block + `on` (\(tx :& blk) -> tx ^. TxBlockId ==. blk ^. BlockId) + where_ (blk ^. BlockBlockNo <=. just (val blkNo)) + pure $ sum_ (tx ^. TxFee) + pure $ unValueSumAda (listToMaybe res) + +queryFeesUpToSlotNo :: MonadIO m => Word64 -> ReaderT SqlBackend m Ada +queryFeesUpToSlotNo slotNo = do + res <- select $ do + (tx :& blk) <- + from $ table @Tx + `innerJoin` table @Block + `on` (\(tx :& blk) -> tx ^. TxBlockId ==. blk ^. BlockId) + where_ (isJust $ blk ^. BlockSlotNo) + where_ (blk ^. BlockSlotNo <=. just (val slotNo)) + pure $ sum_ (tx ^. TxFee) + pure $ unValueSumAda (listToMaybe res) + +queryLatestCachedEpochNo :: MonadIO m => ReaderT SqlBackend m (Maybe Word64) +queryLatestCachedEpochNo = do + res <- select $ do + epoch <- from $ table @Epoch + orderBy [desc (epoch ^. EpochNo)] + limit 1 + pure (epoch ^. EpochNo) + pure $ unValue <$> listToMaybe res + +-- | Get the 'BlockNo' of the latest block. +queryLatestBlockNo :: MonadIO m => ReaderT SqlBackend m (Maybe Word64) +queryLatestBlockNo = do + res <- select $ do + blk <- from $ table @Block + where_ (isJust $ blk ^. BlockBlockNo) + orderBy [desc (blk ^. BlockBlockNo)] + limit 1 + pure $ blk ^. BlockBlockNo + pure $ listToMaybe (mapMaybe unValue res) + +querySlotNosGreaterThan :: MonadIO m => Word64 -> ReaderT SqlBackend m [SlotNo] +querySlotNosGreaterThan slotNo = do + res <- select $ do + blk <- from $ table @Block + -- Want all BlockNos where the block satisfies this predicate. + where_ (blk ^. BlockSlotNo >. just (val slotNo)) + -- Return them in descending order so we can delete the highest numbered + -- ones first. + orderBy [desc (blk ^. BlockSlotNo)] + pure (blk ^. BlockSlotNo) + pure $ mapMaybe (fmap SlotNo . unValue) res + +-- | Like 'querySlotNosGreaterThan', but returns all slots in the same order. +querySlotNos :: MonadIO m => ReaderT SqlBackend m [SlotNo] +querySlotNos = do + res <- select $ do + blk <- from $ table @Block + -- Return them in descending order so we can delete the highest numbered + -- ones first. + orderBy [desc (blk ^. BlockSlotNo)] + pure (blk ^. BlockSlotNo) + pure $ mapMaybe (fmap SlotNo . unValue) res + +-- | Calculate the slot time (as UTCTime) for a given slot number. +-- This will fail if the slot is empty. +querySlotUtcTime :: MonadIO m => Word64 -> ReaderT SqlBackend m (Either LookupFail UTCTime) +querySlotUtcTime slotNo = do + le <- select $ do + blk <- from $ table @Block + where_ (blk ^. BlockSlotNo ==. just (val slotNo)) + pure (blk ^. BlockTime) + pure $ maybe (Left $ DbLookupSlotNo slotNo) (Right . unValue) (listToMaybe le) + +queryUtxoAtBlockNo :: MonadIO m => Word64 -> ReaderT SqlBackend m [(TxOut, ByteString)] +queryUtxoAtBlockNo blkNo = do + eblkId <- select $ do + blk <- from $ table @Block + where_ (blk ^. BlockBlockNo ==. just (val blkNo)) + pure (blk ^. BlockId) + maybe (pure []) (queryUtxoAtBlockId . unValue) (listToMaybe eblkId) + +queryUtxoAtSlotNo :: MonadIO m => Word64 -> ReaderT SqlBackend m [(TxOut, ByteString)] +queryUtxoAtSlotNo slotNo = do + eblkId <- select $ do + blk <- from $ table @Block + where_ (blk ^. BlockSlotNo ==. just (val slotNo)) + pure (blk ^. BlockId) + maybe (pure []) (queryUtxoAtBlockId . unValue) (listToMaybe eblkId) + +queryWithdrawalsUpToBlockNo :: MonadIO m => Word64 -> ReaderT SqlBackend m Ada +queryWithdrawalsUpToBlockNo blkNo = do + res <- select $ do + (_tx :& wdrl :& blk) <- + from $ table @Tx + `innerJoin` table @Withdrawal + `on` (\(tx :& wdrl) -> tx ^. TxId ==. wdrl ^. WithdrawalTxId) + `innerJoin` table @Block + `on` (\(tx :& _wdrl :& blk) -> tx ^. TxBlockId ==. blk ^. BlockId) + where_ (blk ^. BlockBlockNo <=. val (Just $ fromIntegral blkNo)) + pure $ sum_ (wdrl ^. WithdrawalAmount) + pure $ unValueSumAda (listToMaybe res) + +{-} + pure $ sum_ (wdrl ^. WithdrawalAmount) + pure $ unValueSumAda (listToMaybe res) +-} +queryAdaPots :: MonadIO m => BlockId -> ReaderT SqlBackend m (Maybe AdaPots) +queryAdaPots blkId = do + res <- select $ do + adaPots <- from $ table @AdaPots + where_ (adaPots ^. AdaPotsBlockId ==. val blkId) + pure adaPots + pure $ fmap entityVal (listToMaybe res) + +-- | Get the UTxO set after the specified 'BlockId' has been applied to the chain. +-- Not exported because 'BlockId' to 'BlockHash' relationship may not be the same +-- across machines. +queryUtxoAtBlockId :: MonadIO m => BlockId -> ReaderT SqlBackend m [(TxOut, ByteString)] +queryUtxoAtBlockId blkid = do + outputs <- select $ do + (txout :& _txin :& _tx1 :& blk :& tx2) <- + from $ table @TxOut + `leftJoin` table @TxIn + `on` (\(txout :& txin) -> (just (txout ^. TxOutTxId) ==. txin ?. TxInTxOutId) &&. + (just (txout ^. TxOutIndex) ==. txin ?. TxInTxOutIndex)) + `leftJoin` table @Tx + `on` (\(_txout :& txin :& tx1) -> txin ?. TxInTxInId ==. tx1 ?. TxId) + `leftJoin` table @Block + `on` (\(_txout :& _txin :& tx1 :& blk) -> tx1 ?. TxBlockId ==. blk ?. BlockId) + `leftJoin` table @Tx + `on` (\(txout :& _ :& _ :& _ :& tx2) -> just (txout ^. TxOutTxId) ==. tx2 ?. TxId ) + + where_ $ (txout ^. TxOutTxId `in_` txLessEqual blkid) &&. + (isNothing (blk ?. BlockBlockNo)||. (blk ?. BlockId >. just (val blkid))) + pure (txout, tx2 ?. TxHash) + pure $ mapMaybe convert outputs + where + convert :: (Entity TxOut, Value (Maybe ByteString)) -> Maybe (TxOut, ByteString) + convert = \case + (out, Value (Just hash')) -> Just (entityVal out, hash') + (_, Value Nothing) -> Nothing + +queryAddressBalanceAtSlot :: MonadIO m => Text -> Word64 -> ReaderT SqlBackend m Ada +queryAddressBalanceAtSlot addr slotNo = do + eblkId <- select $ do + blk <- from (table @Block) + where_ (blk ^. BlockSlotNo ==. just (val slotNo)) + pure (blk ^. BlockId) + maybe (pure 0) (queryAddressBalanceAtBlockId . unValue) (listToMaybe eblkId) + where + queryAddressBalanceAtBlockId :: MonadIO m => BlockId -> ReaderT SqlBackend m Ada + queryAddressBalanceAtBlockId blkid = do + -- tx1 refers to the tx of the input spending this output (if it is ever spent) + -- tx2 refers to the tx of the output + res <- select $ do + (txout :& _ :& _ :& blk :& _) <- + from $ table @TxOut + `leftJoin` table @TxIn + `on` (\(txout :& txin) -> just (txout ^. TxOutTxId) ==. txin ?. TxInTxOutId) + `leftJoin` table @Tx + `on` (\(_ :& txin :& tx1) -> txin ?. TxInTxInId ==. tx1 ?. TxId) + `leftJoin` table @Block + `on` (\(_ :& _ :& tx1 :& blk) -> tx1 ?. TxBlockId ==. blk ?. BlockId) + `leftJoin` table @Tx + `on` (\(txout :& _ :& _ :& _ :& tx2) -> just (txout ^. TxOutTxId) ==. tx2 ?. TxId ) + where_ $ (txout ^. TxOutTxId `in_` txLessEqual blkid) + &&. (isNothing (blk ?. BlockBlockNo)||. (blk ?. BlockId >. just (val blkid))) + where_ (txout ^. TxOutAddress ==. val addr) + pure $ sum_ (txout ^. TxOutValue) + pure $ unValueSumAda (listToMaybe res) + +{----------------------- + Queries use in tests +------------------------} + +queryAddressOutputs :: MonadIO m => ByteString -> ReaderT SqlBackend m DbLovelace +queryAddressOutputs addr = do + res <- select $ do + txout <- from $ table @TxOut + where_ (txout ^. TxOutAddressRaw ==. val addr) + pure $ sum_ (txout ^. TxOutValue) + pure $ convert (listToMaybe res) + where + convert v = case unValue <$> v of + Just (Just x) -> x + _ -> DbLovelace 0 + +-- | Get the current block height. +queryBlockHeight :: MonadIO m => ReaderT SqlBackend m (Maybe Word64) +queryBlockHeight = do + res <- select $ do + blk <- from $ table @Block + where_ (isJust $ blk ^. BlockBlockNo) + orderBy [desc (blk ^. BlockBlockNo)] + limit 1 + pure (blk ^. BlockBlockNo) + pure $ unValue =<< listToMaybe res +queryRewardCount :: MonadIO m => ReaderT SqlBackend m Word64 +queryRewardCount = do + res <- select $ do + _ <- from $ table @Reward + pure countRows + pure $ maybe 0 unValue (listToMaybe res) + +-- | Count the number of transactions in the Tx table. +queryTxInCount :: MonadIO m => ReaderT SqlBackend m Word +queryTxInCount = do + res <- select $ from (table @TxIn) >> pure countRows + pure $ maybe 0 unValue (listToMaybe res) + +-- | Count the number of transaction outputs in the TxOut table. +queryTxOutCount :: MonadIO m => ReaderT SqlBackend m Word +queryTxOutCount = do + res <- select $ from (table @TxOut) >> pure countRows + pure $ maybe 0 unValue (listToMaybe res) + +queryCostModel :: MonadIO m => ReaderT SqlBackend m [CostModelId] +queryCostModel = + fmap entityKey <$> selectList [] [Asc CostModelId] + +queryScriptOutputs :: MonadIO m => ReaderT SqlBackend m [TxOut] +queryScriptOutputs = do + res <- select $ do + tx_out <- from $ table @TxOut + where_ (tx_out ^. TxOutAddressHasScript ==. val True) + pure tx_out + pure $ entityVal <$> res + +queryTxInRedeemer :: MonadIO m => ReaderT SqlBackend m [TxIn] +queryTxInRedeemer = do + res <- select $ do + tx_in <- from $ table @TxIn where_ (isJust $ tx_in ^. TxInRedeemerId) pure tx_in pure $ entityVal <$> res @@ -1164,56 +1050,26 @@ queryStakeAddressScript = do pure st_addr pure $ entityVal <$> res -queryStakeAddressIdsAfter :: MonadIO m => Word64 -> Bool -> ReaderT SqlBackend m [StakeAddressId] -queryStakeAddressIdsAfter blockNo queryEq = do - res <- select $ do - (_tx :& blk :& st_addr) <- - from $ table @Tx - `innerJoin` table @Block - `on` (\(tx :& blk) -> tx ^. TxBlockId ==. blk ^. BlockId) - `innerJoin` table @StakeAddress - `on` (\(tx :& _blk :& st_addr) -> tx ^. TxId ==. st_addr ^. StakeAddressTxId) - where_ (isJust $ blk ^. BlockBlockNo) - where_ (if queryEq then blk ^. BlockBlockNo >=. val (Just blockNo) - else blk ^. BlockBlockNo >. val (Just blockNo)) - pure (st_addr ^. StakeAddressId) - pure $ unValue <$> res - -existsDelistedPool :: MonadIO m => ByteString -> ReaderT SqlBackend m Bool -existsDelistedPool ph = do - res <- select $ do - delistedPool <- from $ table @DelistedPool - where_ (delistedPool ^. DelistedPoolHashRaw ==. val ph) - limit 1 - pure (delistedPool ^. DelistedPoolId) - pure $ not (null res) - -existsPoolHash :: MonadIO m => ByteString -> ReaderT SqlBackend m Bool -existsPoolHash hsh = do - res <- select $ do - poolHash <- from $ table @PoolHash - where_ (poolHash ^. PoolHashHashRaw ==. val hsh) - limit 1 - pure (poolHash ^. PoolHashId) - pure $ not (null res) - -existsPoolHashId :: MonadIO m => PoolHashId -> ReaderT SqlBackend m Bool -existsPoolHashId phid = do +querySchemaVersion :: MonadIO m => ReaderT SqlBackend m (Maybe SchemaVersion) +querySchemaVersion = do res <- select $ do - poolHash <- from $ table @PoolHash - where_ (poolHash ^. PoolHashId ==. val phid) + sch <- from $ table @SchemaVersion + orderBy [desc (sch ^. SchemaVersionStageOne)] limit 1 - pure (poolHash ^. PoolHashId) - pure $ not (null res) + pure (sch ^. SchemaVersionStageOne, sch ^. SchemaVersionStageTwo, sch ^. SchemaVersionStageThree) + pure $ uncurry3 SchemaVersion . unValue3 <$> listToMaybe res -existsPoolMetadataRefId :: MonadIO m => PoolMetadataRefId -> ReaderT SqlBackend m Bool -existsPoolMetadataRefId pmrid = do +-- | Given a 'SlotNo' return the 'SlotNo' of the previous block. +queryPreviousSlotNo :: MonadIO m => Word64 -> ReaderT SqlBackend m (Maybe Word64) +queryPreviousSlotNo slotNo = do res <- select $ do - pmr <- from $ table @PoolMetadataRef - where_ (pmr ^. PoolMetadataRefId ==. val pmrid) - limit 1 - pure (pmr ^. PoolMetadataRefId) - pure $ not (null res) + (blk :& pblk) <- + from $ table @Block + `innerJoin` table @Block + `on` (\(blk :& pblk) -> blk ^. BlockPreviousId ==. just (pblk ^. BlockId)) + where_ (blk ^. BlockSlotNo ==. just (val slotNo)) + pure $ pblk ^. BlockSlotNo + pure $ unValue =<< listToMaybe res -- ----------------------------------------------------------------------------- -- SqlQuery predicates @@ -1222,24 +1078,6 @@ existsPoolMetadataRefId pmrid = do isJust :: PersistField a => SqlExpr (Value (Maybe a)) -> SqlExpr (Value Bool) isJust = not_ . isNothing --- Returns True if the TxOut has been spent. -{-# INLINABLE txOutSpentB #-} -txOutSpentB :: SqlExpr (Entity TxOut) -> SqlExpr (Value Bool) -txOutSpentB txOut = - exists $ from (table @TxIn) >>= \ txIn -> - where_ (txOut ^. TxOutTxId ==. txIn ^. TxInTxOutId - &&. txOut ^. TxOutIndex ==. txIn ^. TxInTxOutIndex - ) - --- A predicate that filters out unspent 'TxOut' entries. -{-# INLINABLE txOutSpentP #-} -txOutSpentP :: SqlExpr (Entity TxOut) -> SqlQuery () -txOutSpentP txOut = - where_ . exists $ from (table @TxIn) >>= \ txIn -> - where_ (txOut ^. TxOutTxId ==. txIn ^. TxInTxOutId - &&. txOut ^. TxOutIndex ==. txIn ^. TxInTxOutIndex - ) - -- A predicate that filters out spent 'TxOut' entries. {-# INLINABLE txOutUnspentP #-} txOutUnspentP :: SqlExpr (Entity TxOut) -> SqlQuery () diff --git a/cardano-db/src/Cardano/Db/Schema.hs b/cardano-db/src/Cardano/Db/Schema.hs index bc97a3b36..b2ac845ff 100644 --- a/cardano-db/src/Cardano/Db/Schema.hs +++ b/cardano-db/src/Cardano/Db/Schema.hs @@ -48,6 +48,7 @@ share [ mkPersist sqlSettings , mkMigrate "migrateCardanoDb" , mkEntityDefList "entityDefs" +-- , mkDeleteCascade sqlSettings , deriveShowFields ] [persistLowerCase| @@ -70,7 +71,7 @@ share SlotLeader hash ByteString sqltype=hash28type - poolHashId PoolHashId Maybe OnDeleteCascade -- This will be non-null when a block is mined by a pool. + poolHashId PoolHashId Maybe noreference -- This will be non-null when a block is mined by a pool. description Text -- Description of the Slots leader. UniqueSlotLeader hash @@ -87,7 +88,7 @@ share epochSlotNo Word64 Maybe sqltype=word31type blockNo Word64 Maybe sqltype=word31type previousId BlockId Maybe OnDeleteCascade - slotLeaderId SlotLeaderId OnDeleteCascade + slotLeaderId SlotLeaderId noreference size Word64 sqltype=word31type time UTCTime sqltype=timestamp txCount Word64 @@ -117,11 +118,14 @@ share scriptSize Word64 sqltype=word31type UniqueTx hash + ReverseIndex + blockId BlockId OnDeleteCascade + minIds Text + StakeAddress -- Can be an address of a script hash hashRaw ByteString sqltype=addr29type view Text scriptHash ByteString Maybe sqltype=hash28type - txId TxId OnDeleteCascade -- Only used for rollback. UniqueStakeAddress hashRaw TxOut @@ -131,11 +135,11 @@ share addressRaw ByteString addressHasScript Bool paymentCred ByteString Maybe sqltype=hash28type - stakeAddressId StakeAddressId Maybe OnDeleteCascade + stakeAddressId StakeAddressId Maybe noreference value DbLovelace sqltype=lovelace dataHash ByteString Maybe sqltype=hash32type - inlineDatumId DatumId Maybe OnDeleteCascade - referenceScriptId ScriptId Maybe OnDeleteCascade + inlineDatumId DatumId Maybe noreference + referenceScriptId ScriptId Maybe noreference UniqueTxout txId index -- The (tx_id, index) pair must be unique. CollateralTxOut @@ -145,30 +149,29 @@ share addressRaw ByteString addressHasScript Bool paymentCred ByteString Maybe sqltype=hash28type - stakeAddressId StakeAddressId Maybe OnDeleteCascade + stakeAddressId StakeAddressId Maybe noreference value DbLovelace sqltype=lovelace dataHash ByteString Maybe sqltype=hash32type multiAssetsDescr Text - inlineDatumId DatumId Maybe OnDeleteCascade - referenceScriptId ScriptId Maybe OnDeleteCascade + inlineDatumId DatumId Maybe noreference + referenceScriptId ScriptId Maybe noreference UniqueColTxout txId index -- The (tx_id, index) pair must be unique. TxIn txInId TxId OnDeleteCascade -- The transaction where this is used as an input. - txOutId TxId OnDeleteCascade -- The transaction where this was created as an output. + txOutId TxId noreference -- The transaction where this was created as an output. txOutIndex Word64 sqltype=txindex - redeemerId RedeemerId Maybe OnDeleteCascade - UniqueTxin txOutId txOutIndex + redeemerId RedeemerId Maybe noreference CollateralTxIn txInId TxId OnDeleteCascade -- The transaction where this is used as an input. - txOutId TxId OnDeleteCascade -- The transaction where this was created as an output. + txOutId TxId noreference -- The transaction where this was created as an output. txOutIndex Word64 sqltype=txindex UniqueColTxin txInId txOutId txOutIndex ReferenceTxIn txInId TxId OnDeleteCascade -- The transaction where this is used as an input. - txOutId TxId OnDeleteCascade -- The transaction where this was created as an output. + txOutId TxId noreference -- The transaction where this was created as an output. txOutIndex Word64 sqltype=txindex UniqueRefTxin txInId txOutId txOutIndex @@ -180,10 +183,6 @@ share version Text UniqueMeta startTime - - -- The following are tables used my specific 'plugins' to the regular cardano-db-sync node - -- functionality. In the regular cardano-db-sync node these tables will be empty. - -- The Epoch table is an aggregation of data in the 'Block' table, but is kept in this form -- because having it as a 'VIEW' is incredibly slow and inefficient. @@ -216,43 +215,38 @@ share deposits DbLovelace sqltype=lovelace fees DbLovelace sqltype=lovelace blockId BlockId OnDeleteCascade - UniqueAdaPots blockId deriving Eq - -- ----------------------------------------------------------------------------------------------- - PoolMetadataRef - poolId PoolHashId OnDeleteCascade + poolId PoolHashId noreference url Text hash ByteString sqltype=hash32type registeredTxId TxId OnDeleteCascade -- Only used for rollback. UniquePoolMetadataRef poolId url hash PoolUpdate - hashId PoolHashId OnDeleteCascade + hashId PoolHashId noreference certIndex Word16 vrfKeyHash ByteString sqltype=hash32type pledge DbLovelace sqltype=lovelace - rewardAddrId StakeAddressId OnDeleteCascade + rewardAddrId StakeAddressId noreference activeEpochNo Word64 - metaId PoolMetadataRefId Maybe OnDeleteCascade + metaId PoolMetadataRefId Maybe noreference margin Double -- sqltype=percentage???? fixedCost DbLovelace sqltype=lovelace registeredTxId TxId OnDeleteCascade -- Slot number in which the pool was registered. - UniquePoolUpdate registeredTxId certIndex -- A Pool can have more than one owner, so we have a PoolOwner table. PoolOwner - addrId StakeAddressId OnDeleteCascade + addrId StakeAddressId noreference poolUpdateId PoolUpdateId OnDeleteCascade UniquePoolOwner addrId poolUpdateId PoolRetire - hashId PoolHashId OnDeleteCascade + hashId PoolHashId noreference certIndex Word16 announcedTxId TxId OnDeleteCascade -- Slot number in which the pool announced it was retiring. retiringEpoch Word64 sqltype=word31type -- Epoch number in which the pool will retire. - UniquePoolRetiring announcedTxId certIndex PoolRelay updateId PoolUpdateId OnDeleteCascade @@ -261,47 +255,35 @@ share dnsName Text Maybe dnsSrvName Text Maybe port Word16 Maybe - -- Usually NULLables are not allowed in a uniqueness constraint. The semantics of how NULL - -- interacts with those constraints is non-trivial: two NULL values are not considered equal - -- for the purposes of an uniqueness constraint. - -- Use of "!force" attribute on the end of the line disables this check. - UniquePoolRelay updateId ipv4 ipv6 dnsName !force - - -- ----------------------------------------------------------------------------------------------- - -- When was a staking key/script registered StakeRegistration - addrId StakeAddressId OnDeleteCascade + addrId StakeAddressId noreference certIndex Word16 epochNo Word64 sqltype=word31type txId TxId OnDeleteCascade - UniqueStakeRegistration txId certIndex -- When was a staking key/script deregistered StakeDeregistration - addrId StakeAddressId OnDeleteCascade + addrId StakeAddressId noreference certIndex Word16 epochNo Word64 sqltype=word31type txId TxId OnDeleteCascade - redeemerId RedeemerId Maybe OnDeleteCascade - UniqueStakeDeregistration txId certIndex + redeemerId RedeemerId Maybe noreference Delegation - addrId StakeAddressId OnDeleteCascade + addrId StakeAddressId noreference certIndex Word16 - poolHashId PoolHashId OnDeleteCascade + poolHashId PoolHashId noreference activeEpochNo Word64 txId TxId OnDeleteCascade slotNo Word64 sqltype=word63type - redeemerId RedeemerId Maybe OnDeleteCascade - UniqueDelegation txId certIndex + redeemerId RedeemerId Maybe noreference TxMetadata key DbWord64 sqltype=word64type json Text Maybe sqltype=jsonb bytes ByteString sqltype=bytea txId TxId OnDeleteCascade - UniqueTxMetadata key txId -- ----------------------------------------------------------------------------------------------- -- Reward, Stake and Treasury need to be obtained from the ledger state. @@ -310,12 +292,12 @@ share -- epoch in which the reward was earned. -- This table should never get rolled back. Reward - addrId StakeAddressId OnDeleteCascade + addrId StakeAddressId noreference type RewardSource sqltype=rewardtype amount DbLovelace sqltype=lovelace earnedEpoch Word64 spendableEpoch Word64 - poolId PoolHashId Maybe OnDeleteCascade + poolId PoolHashId Maybe noreference -- Usually NULLables are not allowed in a uniqueness constraint. The semantics of how NULL -- interacts with those constraints is non-trivial: two NULL values are not considered equal -- for the purposes of an uniqueness constraint. @@ -324,40 +306,36 @@ share deriving Show Withdrawal - addrId StakeAddressId OnDeleteCascade + addrId StakeAddressId noreference amount DbLovelace sqltype=lovelace - redeemerId RedeemerId Maybe OnDeleteCascade + redeemerId RedeemerId Maybe noreference txId TxId OnDeleteCascade - UniqueWithdrawal addrId txId -- This table should never get rolled back. EpochStake - addrId StakeAddressId OnDeleteCascade - poolId PoolHashId OnDeleteCascade + addrId StakeAddressId noreference + poolId PoolHashId noreference amount DbLovelace sqltype=lovelace epochNo Word64 sqltype=word31type UniqueStake epochNo addrId poolId Treasury - addrId StakeAddressId OnDeleteCascade + addrId StakeAddressId noreference certIndex Word16 amount DbInt65 sqltype=int65type txId TxId OnDeleteCascade - UniqueTreasury addrId txId certIndex Reserve - addrId StakeAddressId OnDeleteCascade + addrId StakeAddressId noreference certIndex Word16 amount DbInt65 sqltype=int65type txId TxId OnDeleteCascade - UniqueReserves addrId txId certIndex PotTransfer certIndex Word16 treasury DbInt65 sqltype=int65type reserves DbInt65 sqltype=int65type txId TxId OnDeleteCascade - UniquePotTransfer txId certIndex EpochSyncTime no Word64 @@ -375,20 +353,15 @@ share UniqueMultiAsset policy name MaTxMint - ident MultiAssetId OnDeleteCascade + ident MultiAssetId noreference quantity DbInt65 sqltype=int65type txId TxId OnDeleteCascade - UniqueMaTxMint ident txId MaTxOut - ident MultiAssetId OnDeleteCascade + ident MultiAssetId noreference quantity DbWord64 sqltype=word64type txOutId TxOutId OnDeleteCascade - UniqueMaTxOut ident txOutId - -- ----------------------------------------------------------------------------------------------- - -- Scripts related tables. - -- -- Unit step is in picosends, and `maxBound :: Int64` picoseconds is over 100 days, so using -- Word64/word63type is safe here. Similarly, `maxBound :: Int64` if unit step would be an -- *enormous* amount a memory which would cost a fortune. @@ -400,8 +373,7 @@ share purpose ScriptPurpose sqltype=scriptpurposetype index Word64 sqltype=word31type scriptHash ByteString Maybe sqltype=hash28type - redeemerDataId RedeemerDataId OnDeleteCascade - UniqueRedeemer txId purpose index + redeemerDataId RedeemerDataId noreference Script txId TxId OnDeleteCascade @@ -429,10 +401,6 @@ share ExtraKeyWitness hash ByteString sqltype=hash28type txId TxId OnDeleteCascade - UniqueWitness hash - - -- ----------------------------------------------------------------------------------------------- - -- Update parameter proposals. ParamProposal epochNo Word64 sqltype=word31type @@ -457,7 +425,7 @@ share minPoolCost DbLovelace Maybe sqltype=lovelace coinsPerUtxoSize DbLovelace Maybe sqltype=lovelace - costModelId CostModelId Maybe OnDeleteCascade + costModelId CostModelId Maybe noreference priceMem Double Maybe -- sqltype=rational priceStep Double Maybe -- sqltype=rational maxTxExMem DbWord64 Maybe sqltype=word64type @@ -468,8 +436,7 @@ share collateralPercent Word16 Maybe sqltype=word31type maxCollateralInputs Word16 Maybe sqltype=word31type - registeredTxId TxId OnDeleteCascade -- Slot number in which update registered. - UniqueParamProposal key registeredTxId + registeredTxId TxId OnDeleteCascade EpochParam epochNo Word64 sqltype=word31type @@ -495,7 +462,7 @@ share nonce ByteString Maybe sqltype=hash32type coinsPerUtxoSize DbLovelace Maybe sqltype=lovelace - costModelId CostModelId Maybe OnDeleteCascade + costModelId CostModelId Maybe noreference priceMem Double Maybe -- sqltype=rational priceStep Double Maybe -- sqltype=rational maxTxExMem DbWord64 Maybe sqltype=word64type @@ -507,19 +474,17 @@ share maxCollateralInputs Word16 Maybe sqltype=word31type blockId BlockId OnDeleteCascade -- The first block where these parameters are valid. - UniqueEpochParam epochNo blockId CostModel hash ByteString sqltype=hash32type costs Text sqltype=jsonb - blockId BlockId OnDeleteCascade UniqueCostModel hash -- ----------------------------------------------------------------------------------------------- -- Pool offline (ie not on the blockchain) data. PoolOfflineData - poolId PoolHashId OnDeleteCascade + poolId PoolHashId noreference tickerName Text hash ByteString sqltype=hash32type json Text sqltype=jsonb @@ -532,7 +497,7 @@ share -- TODO(KS): Debatable whether we need to persist this between migrations! PoolOfflineFetchError - poolId PoolHashId OnDeleteCascade + poolId PoolHashId noreference fetchTime UTCTime sqltype=timestamp pmrId PoolMetadataRefId OnDeleteCascade fetchError Text @@ -614,13 +579,10 @@ schemaDocs = StakeAddress --^ do "A table of unique stake addresses. Can be an actual address or a script hash. \ - \ The existance of an entry doesn't mean the address is registered or in fact that is was ever registered.\ - \ For example a pool update may contain a stake address which was never registered." + \ The existance of an entry doesn't mean the address is registered or in fact that is was ever registered." StakeAddressHashRaw # "The raw bytes of the stake address hash." StakeAddressView # "The Bech32 encoded version of the stake address." StakeAddressScriptHash # "The script hash, in case this address is locked by a script." - StakeAddressTxId # "The Tx table index of the transaction in which this address first appeared.\ - \ New in v13: Renamed from registered_tx_id." TxOut --^ do "A table for transaction outputs." @@ -965,7 +927,6 @@ schemaDocs = "CostModel for EpochParam and ParamProposal." CostModelHash # "The hash of cost model. It ensures uniqueness of entries. New in v13." CostModelCosts # "The actual costs formatted as json." - CostModelBlockId # "The first block where these costs were introduced. This is only used for rollbacks." PoolOfflineData --^ do "The pool offline (ie not on chain) for a stake pool." diff --git a/cardano-db/src/Cardano/Db/Update.hs b/cardano-db/src/Cardano/Db/Update.hs deleted file mode 100644 index ec210c32d..000000000 --- a/cardano-db/src/Cardano/Db/Update.hs +++ /dev/null @@ -1,24 +0,0 @@ -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE TypeFamilies #-} - -module Cardano.Db.Update - ( upateDatumBytes - , upateRedeemerDataBytes - ) where - -import Control.Monad.IO.Class (MonadIO) -import Control.Monad.Trans.Reader (ReaderT) - -import Cardano.Db.Schema -import Data.ByteString (ByteString) -import Database.Persist.Postgresql (SqlBackend) -import Database.Persist.Class -import Database.Persist ((=.)) - -upateDatumBytes :: MonadIO m => DatumId -> ByteString -> ReaderT SqlBackend m () -upateDatumBytes datumId bytes = update datumId [DatumBytes =. bytes] - -upateRedeemerDataBytes :: MonadIO m => RedeemerDataId -> ByteString -> ReaderT SqlBackend m () -upateRedeemerDataBytes rdmDataId bytes = update rdmDataId [RedeemerDataBytes =. bytes] diff --git a/cardano-db/test/Test/IO/Cardano/Db/Insert.hs b/cardano-db/test/Test/IO/Cardano/Db/Insert.hs index 7b6aa1966..add5a46de 100644 --- a/cardano-db/test/Test/IO/Cardano/Db/Insert.hs +++ b/cardano-db/test/Test/IO/Cardano/Db/Insert.hs @@ -35,8 +35,8 @@ insertZeroTest = runDbNoLoggingEnv $ do -- Delete the blocks if they exist. slid <- insertSlotLeader testSlotLeader - void $ deleteCascadeBlock (blockOne slid) - void $ deleteCascadeBlock (blockZero slid) + void $ deleteBlock (blockOne slid) + void $ deleteBlock (blockZero slid) -- Insert the same block twice. The first should be successful (resulting -- in a 'Right') and the second should return the same value in a 'Left'. bid0 <- insertBlockChecked (blockZero slid) @@ -49,7 +49,7 @@ insertFirstTest = runDbNoLoggingEnv $ do -- Delete the block if it exists. slid <- insertSlotLeader testSlotLeader - void $ deleteCascadeBlock (blockOne slid) + void $ deleteBlock (blockOne slid) -- Insert the same block twice. bid0 <- insertBlockChecked (blockZero slid) bid1 <- insertBlockChecked $ (\b -> b { blockPreviousId = Just bid0 }) (blockOne slid) diff --git a/cardano-db/test/Test/IO/Cardano/Db/Migration.hs b/cardano-db/test/Test/IO/Cardano/Db/Migration.hs index 8d26acc95..8511c30d0 100644 --- a/cardano-db/test/Test/IO/Cardano/Db/Migration.hs +++ b/cardano-db/test/Test/IO/Cardano/Db/Migration.hs @@ -4,12 +4,12 @@ module Test.IO.Cardano.Db.Migration ) where import Cardano.Db (LogFileDir (..), MigrationDir (..), MigrationValidate (..), - MigrationValidateError (..), MigrationVersion (..), SchemaVersion (..), - getMigrationScripts, querySchemaVersion, readPGPassDefault, renderPGPassError, - runDbNoLoggingEnv, runMigrations, validateMigrations) + MigrationValidateError (..), MigrationVersion (..), MigrationToRun (..), + SchemaVersion (..), getMigrationScripts, querySchemaVersion, readPGPassDefault, + renderPGPassError, runDbNoLoggingEnv, runMigrations, validateMigrations) import Control.Monad (unless, when) import Control.Monad.Trans.Except.Exit (orDie) -import Control.Monad.Trans.Except.Extra (newExceptT, runExceptT) +import Control.Monad.Trans.Except.Extra (newExceptT) import qualified Data.List as List import qualified Data.List.Extra as List @@ -32,8 +32,8 @@ tests = unknownMigrationValidate :: IO () unknownMigrationValidate = do - result <- runExceptT $ validateMigrations testSchemaDir knownTestMigrations - unless (result == expected) $ + result <- validateMigrations testSchemaDir knownTestMigrations + unless (result == Just (expected, False)) $ error $ mconcat [ "Schema version mismatch. Expected " , show expected @@ -42,9 +42,9 @@ unknownMigrationValidate = do , "." ] where - expected :: Either MigrationValidateError () + expected :: MigrationValidateError expected = - Left $ UnknownMigrationsFound + UnknownMigrationsFound { missingMigrations = [ MigrationValidate { mvHash = "hash" @@ -62,8 +62,8 @@ unknownMigrationValidate = do invalidHashMigrationValidate :: IO () invalidHashMigrationValidate = do - result <- runExceptT $ validateMigrations testSchemaDir knownTestMigrations - unless (result == expected) $ + result <- validateMigrations testSchemaDir knownTestMigrations + unless (result == Just (expected, False)) $ error $ mconcat [ "Schema version mismatch. Expected " , show expected @@ -72,9 +72,9 @@ invalidHashMigrationValidate = do , "." ] where - expected :: Either MigrationValidateError () + expected :: MigrationValidateError expected = - Left $ UnknownMigrationsFound + UnknownMigrationsFound { missingMigrations = [ MigrationValidate { mvHash = "hash" @@ -92,8 +92,8 @@ invalidHashMigrationValidate = do invalidHashMigrationValidate' :: IO () invalidHashMigrationValidate' = do let emptyMigrations = [] -- No known migrations from compiling - result <- runExceptT $ validateMigrations testSchemaDir emptyMigrations - unless (result == expected) $ + result <- validateMigrations testSchemaDir emptyMigrations + unless (result == Just (expected, False)) $ error $ mconcat [ "Schema version mismatch. Expected " , show expected @@ -102,9 +102,9 @@ invalidHashMigrationValidate' = do , "." ] where - expected :: Either MigrationValidateError () + expected :: MigrationValidateError expected = - Left $ UnknownMigrationsFound + UnknownMigrationsFound { missingMigrations = [] , extraMigrations = [ MigrationValidate @@ -120,7 +120,7 @@ migrationTest :: IO () migrationTest = do let schemaDir = MigrationDir "../schema" pgConfig <- orDie renderPGPassError $ newExceptT readPGPassDefault - _ <-runMigrations pgConfig True schemaDir (Just $ LogFileDir "/tmp") + _ <- runMigrations pgConfig True schemaDir (Just $ LogFileDir "/tmp") Initial expected <- readSchemaVersion schemaDir actual <- getDbSchemaVersion unless (expected == actual) $ diff --git a/cardano-db/test/Test/IO/Cardano/Db/Rollback.hs b/cardano-db/test/Test/IO/Cardano/Db/Rollback.hs index d19f70c41..91197f127 100644 --- a/cardano-db/test/Test/IO/Cardano/Db/Rollback.hs +++ b/cardano-db/test/Test/IO/Cardano/Db/Rollback.hs @@ -51,7 +51,7 @@ rollbackTest = -- Rollback a set of blocks. latestSlotNo <- queryLatestSlotNo Just pSlotNo <- queryWalkChain 5 latestSlotNo - void $ deleteCascadeSlotNo (SlotNo pSlotNo) + void $ deleteBlocksSlotNoNoTrace (SlotNo pSlotNo) -- Assert the expected final state. afterBlocks <- queryBlockCount assertBool ("Block count after rollback is " ++ show afterBlocks ++ " but should be 10") $ afterBlocks == 4 diff --git a/config/pgpass-mainnet b/config/pgpass-mainnet index fde56efbc..beb11d137 100644 --- a/config/pgpass-mainnet +++ b/config/pgpass-mainnet @@ -1 +1 @@ -/var/run/postgresql:5432:cexplorer:*:* +/var/run/postgresql:5432:mainnet:*:* diff --git a/schema/migration-2-0020-20221019.sql b/schema/migration-2-0020-20221019.sql new file mode 100644 index 000000000..40269e35e --- /dev/null +++ b/schema/migration-2-0020-20221019.sql @@ -0,0 +1,36 @@ +-- Persistent generated migration. + +CREATE FUNCTION migrate() RETURNS void AS $$ +DECLARE + next_version int ; +BEGIN + SELECT stage_two + 1 INTO next_version FROM schema_version ; + IF next_version = 20 THEN + EXECUTE 'ALTER TABLE "pool_update" DROP CONSTRAINT "unique_pool_update"' ; + EXECUTE 'ALTER TABLE "pool_retire" DROP CONSTRAINT "unique_pool_retiring"' ; + EXECUTE 'ALTER TABLE "pool_relay" DROP CONSTRAINT "unique_pool_relay"' ; + EXECUTE 'ALTER TABLE "stake_registration" DROP CONSTRAINT "unique_stake_registration"' ; + EXECUTE 'ALTER TABLE "stake_deregistration" DROP CONSTRAINT "unique_stake_deregistration"' ; + EXECUTE 'ALTER TABLE "delegation" DROP CONSTRAINT "unique_delegation"' ; + EXECUTE 'ALTER TABLE "tx_metadata" DROP CONSTRAINT "unique_tx_metadata"' ; + EXECUTE 'ALTER TABLE "withdrawal" DROP CONSTRAINT "unique_withdrawal"' ; + EXECUTE 'ALTER TABLE "treasury" DROP CONSTRAINT "unique_treasury"' ; + EXECUTE 'ALTER TABLE "reserve" DROP CONSTRAINT "unique_reserves"' ; + EXECUTE 'ALTER TABLE "pot_transfer" DROP CONSTRAINT "unique_pot_transfer"' ; + EXECUTE 'ALTER TABLE "ma_tx_mint" DROP CONSTRAINT "unique_ma_tx_mint"' ; + EXECUTE 'ALTER TABLE "ma_tx_out" DROP CONSTRAINT "unique_ma_tx_out"' ; + EXECUTE 'ALTER TABLE "redeemer" DROP CONSTRAINT "unique_redeemer"' ; + EXECUTE 'ALTER TABLE "extra_key_witness" DROP CONSTRAINT "unique_witness"' ; + EXECUTE 'ALTER TABLE "param_proposal" DROP CONSTRAINT "unique_param_proposal"' ; + EXECUTE 'ALTER TABLE "epoch_param" DROP CONSTRAINT "unique_epoch_param"' ; + + -- Hand written SQL statements can be added here. + UPDATE schema_version SET stage_two = next_version ; + RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ; + END IF ; +END ; +$$ LANGUAGE plpgsql ; + +SELECT migrate() ; + +DROP FUNCTION migrate() ; diff --git a/schema/migration-2-0021-20221020.sql b/schema/migration-2-0021-20221020.sql new file mode 100644 index 000000000..dced4e591 --- /dev/null +++ b/schema/migration-2-0021-20221020.sql @@ -0,0 +1,51 @@ +-- Persistent generated migration. + +CREATE FUNCTION migrate() RETURNS void AS $$ +DECLARE + next_version int ; +BEGIN + SELECT stage_two + 1 INTO next_version FROM schema_version ; + IF next_version = 21 THEN + EXECUTE 'ALTER TABLE "slot_leader" DROP CONSTRAINT "slot_leader_pool_hash_id_fkey"' ; + EXECUTE 'ALTER TABLE "block" DROP CONSTRAINT "block_slot_leader_id_fkey"' ; + EXECUTE 'ALTER TABLE "tx_out" DROP CONSTRAINT "tx_out_inline_datum_id_fkey"' ; + EXECUTE 'ALTER TABLE "tx_out" DROP CONSTRAINT "tx_out_reference_script_id_fkey"' ; + EXECUTE 'ALTER TABLE "tx_out" DROP CONSTRAINT "tx_out_stake_address_id_fkey"' ; + EXECUTE 'ALTER TABLE "collateral_tx_out" DROP CONSTRAINT "collateral_tx_out_inline_datum_id_fkey"' ; + EXECUTE 'ALTER TABLE "collateral_tx_out" DROP CONSTRAINT "collateral_tx_out_reference_script_id_fkey"' ; + EXECUTE 'ALTER TABLE "collateral_tx_out" DROP CONSTRAINT "collateral_tx_out_stake_address_id_fkey"' ; + EXECUTE 'ALTER TABLE "tx_in" DROP CONSTRAINT "tx_in_redeemer_id_fkey"' ; + EXECUTE 'ALTER TABLE "tx_in" DROP CONSTRAINT "tx_in_tx_out_id_fkey"' ; + EXECUTE 'ALTER TABLE "collateral_tx_in" DROP CONSTRAINT "collateral_tx_in_tx_out_id_fkey"' ; + EXECUTE 'ALTER TABLE "reference_tx_in" DROP CONSTRAINT "reference_tx_in_tx_out_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_metadata_ref" DROP CONSTRAINT "pool_metadata_ref_pool_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_update" DROP CONSTRAINT "pool_update_hash_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_update" DROP CONSTRAINT "pool_update_meta_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_update" DROP CONSTRAINT "pool_update_reward_addr_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_owner" DROP CONSTRAINT "pool_owner_addr_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_retire" DROP CONSTRAINT "pool_retire_hash_id_fkey"' ; + EXECUTE 'ALTER TABLE "stake_registration" DROP CONSTRAINT "stake_registration_addr_id_fkey"' ; + EXECUTE 'ALTER TABLE "stake_deregistration" DROP CONSTRAINT "stake_deregistration_addr_id_fkey"' ; + EXECUTE 'ALTER TABLE "stake_deregistration" DROP CONSTRAINT "stake_deregistration_redeemer_id_fkey"' ; + EXECUTE 'ALTER TABLE "delegation" DROP CONSTRAINT "delegation_addr_id_fkey"' ; + EXECUTE 'ALTER TABLE "delegation" DROP CONSTRAINT "delegation_pool_hash_id_fkey"' ; + EXECUTE 'ALTER TABLE "delegation" DROP CONSTRAINT "delegation_redeemer_id_fkey"' ; + EXECUTE 'ALTER TABLE "withdrawal" DROP CONSTRAINT "withdrawal_addr_id_fkey"' ; + EXECUTE 'ALTER TABLE "withdrawal" DROP CONSTRAINT "withdrawal_redeemer_id_fkey"' ; + EXECUTE 'ALTER TABLE "treasury" DROP CONSTRAINT "treasury_addr_id_fkey"' ; + EXECUTE 'ALTER TABLE "reserve" DROP CONSTRAINT "reserve_addr_id_fkey"' ; + EXECUTE 'ALTER TABLE "ma_tx_mint" DROP CONSTRAINT "ma_tx_mint_ident_fkey"' ; + EXECUTE 'ALTER TABLE "ma_tx_out" DROP CONSTRAINT "ma_tx_out_ident_fkey"' ; + EXECUTE 'ALTER TABLE "redeemer" DROP CONSTRAINT "redeemer_redeemer_data_id_fkey"' ; + EXECUTE 'ALTER TABLE "param_proposal" DROP CONSTRAINT "param_proposal_cost_model_id_fkey"' ; + EXECUTE 'ALTER TABLE "epoch_param" DROP CONSTRAINT "epoch_param_cost_model_id_fkey"' ; + -- Hand written SQL statements can be added here. + UPDATE schema_version SET stage_two = next_version ; + RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ; + END IF ; +END ; +$$ LANGUAGE plpgsql ; + +SELECT migrate() ; + +DROP FUNCTION migrate() ; diff --git a/schema/migration-2-0022-20221020.sql b/schema/migration-2-0022-20221020.sql new file mode 100644 index 000000000..607b1d2c8 --- /dev/null +++ b/schema/migration-2-0022-20221020.sql @@ -0,0 +1,58 @@ +-- Persistent generated migration. + +CREATE FUNCTION migrate() RETURNS void AS $$ +DECLARE + next_version int ; +BEGIN + SELECT stage_two + 1 INTO next_version FROM schema_version ; + IF next_version = 22 THEN + EXECUTE 'ALTER TABLE "block" DROP CONSTRAINT "block_previous_id_fkey"' ; + EXECUTE 'ALTER TABLE "tx" DROP CONSTRAINT "tx_block_id_fkey"' ; + EXECUTE 'ALTER TABLE "stake_address" DROP CONSTRAINT "stake_address_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "tx_out" DROP CONSTRAINT "tx_out_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "collateral_tx_out" DROP CONSTRAINT "collateral_tx_out_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "tx_in" DROP CONSTRAINT "tx_in_tx_in_id_fkey"' ; + EXECUTE 'ALTER TABLE "collateral_tx_in" DROP CONSTRAINT "collateral_tx_in_tx_in_id_fkey"' ; + EXECUTE 'ALTER TABLE "reference_tx_in" DROP CONSTRAINT "reference_tx_in_tx_in_id_fkey"' ; + EXECUTE 'ALTER TABLE "ada_pots" DROP CONSTRAINT "ada_pots_block_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_metadata_ref" DROP CONSTRAINT "pool_metadata_ref_registered_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_update" DROP CONSTRAINT "pool_update_registered_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_owner" DROP CONSTRAINT "pool_owner_pool_update_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_retire" DROP CONSTRAINT "pool_retire_announced_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_relay" DROP CONSTRAINT "pool_relay_update_id_fkey"' ; + EXECUTE 'ALTER TABLE "stake_registration" DROP CONSTRAINT "stake_registration_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "stake_deregistration" DROP CONSTRAINT "stake_deregistration_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "delegation" DROP CONSTRAINT "delegation_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "tx_metadata" DROP CONSTRAINT "tx_metadata_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "reward" DROP CONSTRAINT "reward_pool_id_fkey"' ; + EXECUTE 'ALTER TABLE "reward" DROP CONSTRAINT "reward_addr_id_fkey"' ; + EXECUTE 'ALTER TABLE "withdrawal" DROP CONSTRAINT "withdrawal_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "epoch_stake" DROP CONSTRAINT "epoch_stake_addr_id_fkey"' ; + EXECUTE 'ALTER TABLE "epoch_stake" DROP CONSTRAINT "epoch_stake_pool_id_fkey"' ; + EXECUTE 'ALTER TABLE "treasury" DROP CONSTRAINT "treasury_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "reserve" DROP CONSTRAINT "reserve_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "pot_transfer" DROP CONSTRAINT "pot_transfer_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "ma_tx_mint" DROP CONSTRAINT "ma_tx_mint_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "ma_tx_out" DROP CONSTRAINT "ma_tx_out_tx_out_id_fkey"' ; + EXECUTE 'ALTER TABLE "redeemer" DROP CONSTRAINT "redeemer_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "script" DROP CONSTRAINT "script_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "datum" DROP CONSTRAINT "datum_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "redeemer_data" DROP CONSTRAINT "redeemer_data_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "extra_key_witness" DROP CONSTRAINT "extra_key_witness_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "param_proposal" DROP CONSTRAINT "param_proposal_registered_tx_id_fkey"' ; + EXECUTE 'ALTER TABLE "epoch_param" DROP CONSTRAINT "epoch_param_block_id_fkey"' ; + EXECUTE 'ALTER TABLE "cost_model" DROP CONSTRAINT "cost_model_block_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_offline_data" DROP CONSTRAINT "pool_offline_data_pool_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_offline_data" DROP CONSTRAINT "pool_offline_data_pmr_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_offline_fetch_error" DROP CONSTRAINT "pool_offline_fetch_error_pool_id_fkey"' ; + EXECUTE 'ALTER TABLE "pool_offline_fetch_error" DROP CONSTRAINT "pool_offline_fetch_error_pmr_id_fkey"' ; + -- Hand written SQL statements can be added here. + UPDATE schema_version SET stage_two = next_version ; + RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ; + END IF ; +END ; +$$ LANGUAGE plpgsql ; + +SELECT migrate() ; + +DROP FUNCTION migrate() ; diff --git a/schema/migration-2-0023-20221028.sql b/schema/migration-2-0023-20221028.sql new file mode 100644 index 000000000..c9ed0979c --- /dev/null +++ b/schema/migration-2-0023-20221028.sql @@ -0,0 +1,23 @@ +-- Persistent generated migration. + +CREATE FUNCTION migrate() RETURNS void AS $$ +DECLARE + next_version int ; +BEGIN + SELECT stage_two + 1 INTO next_version FROM schema_version ; + IF next_version = 23 THEN + EXECUTE 'CREATe TABLE "reverse_index"("id" SERIAL8 PRIMARY KEY UNIQUE,"block_id" INT8 NOT NULL,"min_ids" VARCHAR NULL)' ; + EXECUTE 'ALTER TABLE "stake_address" DROP COLUMN "tx_id"' ; + EXECUTE 'ALTER TABLE "tx_in" DROP CONSTRAINT "unique_txin"' ; + EXECUTE 'ALTER TABLE "ada_pots" DROP CONSTRAINT "unique_ada_pots"' ; + EXECUTE 'ALTER TABLE "cost_model" DROP COLUMN "block_id"' ; + -- Hand written SQL statements can be added here. + UPDATE schema_version SET stage_two = next_version ; + RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ; + END IF ; +END ; +$$ LANGUAGE plpgsql ; + +SELECT migrate() ; + +DROP FUNCTION migrate() ; diff --git a/schema/migration-3-0001-20200521.sql b/schema/migration-3-0001-20200521.sql new file mode 100644 index 000000000..d0ea9ff83 --- /dev/null +++ b/schema/migration-3-0001-20200521.sql @@ -0,0 +1,43 @@ +CREATE FUNCTION migrate() RETURNS void AS $$ +DECLARE + next_version int ; +BEGIN + SELECT stage_three + 1 INTO next_version FROM schema_version ; + IF next_version <= 2 THEN + -- ------------------------------------------------------------------------- + -- Hand crafted indices for performance + + -- Without these indices 'cardano-db-tool validate' will put a heavy load + -- on Postgres. + + CREATE INDEX idx_block_block_no ON block(block_no); + + CREATE INDEX idx_block_epoch_no ON block(epoch_no); + + CREATE INDEX idx_tx_block_id ON tx(block_id); + + CREATE INDEX idx_reward_spendable_epoch ON reward(spendable_epoch); + + CREATE INDEX idx_block_slot_no ON block(slot_no); + + CREATE INDEX idx_tx_out_index ON tx_out(index); + + CREATE INDEX idx_epoch_stake_epoch_no ON epoch_stake(epoch_no) ; + +-- CREATE INDEX idx_block_previous_id +-- ON block(previous_id); +-- +-- CREATE INDEX idx_tx_in_source_tx +-- ON tx_in(tx_in_id); + + -- ------------------------------------------------------------------------- + + UPDATE schema_version SET stage_three = 2 ; + RAISE NOTICE 'DB has been migrated to stage_three version %', next_version ; + END IF ; +END ; +$$ LANGUAGE plpgsql ; + +SELECT migrate() ; + +DROP FUNCTION migrate() ;