Skip to content

Commit

Permalink
Merge pull request #21 from scrive/no-downtime-composites
Browse files Browse the repository at this point in the history
Add no-downtime migrations support for composite types
  • Loading branch information
23Skidoo committed Apr 30, 2019
2 parents 72e46aa + 1391482 commit fecf157
Show file tree
Hide file tree
Showing 10 changed files with 247 additions and 90 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
# hpqtypes-extras-1.8.0.0 (2019-??-??)
* Make composite types subject to migration process
* Add migration type for concurrent creation of an index

# hpqtypes-extras-1.7.1.0 (2019-02-04)
* Fix an issue where unnecessary migrations were run sometimes
([#18](https://github.com/scrive/hpqtypes-extras/pull/18)).
Expand Down
2 changes: 1 addition & 1 deletion hpqtypes-extras.cabal
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
cabal-version: 1.18
name: hpqtypes-extras
version: 1.7.1.0
version: 1.8.0.0
synopsis: Extra utilities for hpqtypes library
description: The following extras for hpqtypes library:
.
Expand Down
126 changes: 111 additions & 15 deletions src/Database/PostgreSQL/PQTypes/Checks.hs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ module Database.PostgreSQL.PQTypes.Checks (
, migrateDatabase
) where

import Control.Arrow ((&&&))
import Control.Applicative ((<$>))
import Control.Monad.Catch
import Control.Monad.Reader
Expand All @@ -29,6 +30,8 @@ import Log
import Prelude
import TextShow
import qualified Data.List as L
import qualified Data.Map as M
import qualified Data.Set as S
import qualified Data.Text as T

import Database.PostgreSQL.PQTypes.ExtrasOptions
Expand All @@ -46,16 +49,22 @@ headExc _ (x:_) = x

-- | Run migrations and check the database structure.
migrateDatabase
:: (MonadDB m, MonadLog m, MonadThrow m)
=> ExtrasOptions -> [Extension] -> [Domain] -> [Table] -> [Migration m]
:: (MonadDB m, MonadLog m, MonadMask m)
=> ExtrasOptions
-> [Extension]
-> [CompositeType]
-> [Domain]
-> [Table]
-> [Migration m]
-> m ()
migrateDatabase options@ExtrasOptions{..}
extensions domains tables migrations = do
extensions composites domains tables migrations = do
setDBTimeZoneToUTC
mapM_ checkExtension extensions
tablesWithVersions <- getTableVersions (tableVersions : tables)
-- 'checkDBConsistency' also performs migrations.
checkDBConsistency options domains tablesWithVersions migrations
resultCheck =<< checkCompositesStructure True composites
resultCheck =<< checkDomainsStructure domains
resultCheck =<< checkDBStructure options tablesWithVersions
resultCheck =<< checkTablesWereDropped migrations
Expand All @@ -69,22 +78,23 @@ migrateDatabase options@ExtrasOptions{..}
-- needs to be migrated. Will do a full check of DB structure.
checkDatabase
:: forall m . (MonadDB m, MonadLog m, MonadThrow m)
=> ExtrasOptions -> [Domain] -> [Table] -> m ()
=> ExtrasOptions -> [CompositeType] -> [Domain] -> [Table] -> m ()
checkDatabase options = checkDatabase_ options False

-- | Same as 'checkDatabase', but will not failed if there are
-- additional tables in database.
checkDatabaseAllowUnknownTables
:: forall m . (MonadDB m, MonadLog m, MonadThrow m)
=> ExtrasOptions -> [Domain] -> [Table] -> m ()
=> ExtrasOptions -> [CompositeType] -> [Domain] -> [Table] -> m ()
checkDatabaseAllowUnknownTables options = checkDatabase_ options True

checkDatabase_
:: forall m . (MonadDB m, MonadLog m, MonadThrow m)
=> ExtrasOptions -> Bool -> [Domain] -> [Table] -> m ()
checkDatabase_ options allowUnknownTables domains tables = do
=> ExtrasOptions -> Bool -> [CompositeType] -> [Domain] -> [Table] -> m ()
checkDatabase_ options allowUnknownTables composites domains tables = do
tablesWithVersions <- getTableVersions (tableVersions : tables)
resultCheck $ checkVersions tablesWithVersions
resultCheck =<< checkCompositesStructure False composites
resultCheck =<< checkDomainsStructure domains
resultCheck =<< checkDBStructure options tablesWithVersions
when (not $ allowUnknownTables) $ do
Expand Down Expand Up @@ -297,6 +307,64 @@ checkTablesWereDropped mgrs = do
<> "' that must have been dropped"
<> " is still present in the database."

-- | Check that there is 1 to 1 correspondence between composite types in the
-- database and the list of their code definitions.
checkCompositesStructure
:: MonadDB m
=> Bool -- ^ Create types if none are present in the database
-> [CompositeType]
-> m ValidationResult
checkCompositesStructure createTypes compositeList = getDBCompositeTypes >>= \case
[] | createTypes -> do -- if there are no composite types in the database,
-- create them (if there are any as code definitions).
mapM_ (runQuery_ . sqlCreateComposite) compositeList
return mempty
dbCompositeTypes -> pure $ mconcat
[ checkNotPresentComposites
, checkDatabaseComposites
]
where
compositeMap = M.fromList $
map ((unRawSQL . ctName) &&& ctColumns) compositeList

checkNotPresentComposites =
let notPresent = S.toList $ M.keysSet compositeMap
S.\\ S.fromList (map (unRawSQL . ctName) dbCompositeTypes)
in validateIsNull "Composite types not present in the database:" notPresent

checkDatabaseComposites = mconcat . (`map` dbCompositeTypes) $ \dbComposite ->
let cname = unRawSQL $ ctName dbComposite
in case cname `M.lookup` compositeMap of
Just columns -> topMessage "composite type" cname $
checkColumns 1 columns (ctColumns dbComposite)
Nothing -> validationError $ "Composite type '" <> T.pack (show dbComposite)
<> "' from the database doesn't have a corresponding code definition"
where
checkColumns
:: Int -> [CompositeColumn] -> [CompositeColumn] -> ValidationResult
checkColumns _ [] [] = mempty
checkColumns _ rest [] = validationError $
objectHasLess "Composite type" "columns" rest
checkColumns _ [] rest = validationError $
objectHasMore "Composite type" "columns" rest
checkColumns !n (d:defs) (c:cols) = mconcat [
validateNames $ ccName d == ccName c
, validateTypes $ ccType d == ccType c
, checkColumns (n+1) defs cols
]
where
validateNames True = mempty
validateNames False = validationError $
errorMsg ("no. " <> showt n) "names" (unRawSQL . ccName)

validateTypes True = mempty
validateTypes False = validationError $
errorMsg (unRawSQL $ ccName d) "types" (T.pack . show . ccType)

errorMsg ident attr f =
"Column '" <> ident <> "' differs in"
<+> attr <+> "(database:" <+> f c <> ", definition:" <+> f d <> ")."

-- | Checks whether the database is consistent.
checkDBStructure
:: forall m. (MonadDB m, MonadThrow m)
Expand Down Expand Up @@ -360,8 +428,10 @@ checkDBStructure options tables = fmap mconcat .
checkColumns
:: Int -> [TableColumn] -> [TableColumn] -> ValidationResult
checkColumns _ [] [] = mempty
checkColumns _ rest [] = validationError $ tableHasLess "columns" rest
checkColumns _ [] rest = validationError $ tableHasMore "columns" rest
checkColumns _ rest [] = validationError $
objectHasLess "Table" "columns" rest
checkColumns _ [] rest = validationError $
objectHasMore "Table" "columns" rest
checkColumns !n (d:defs) (c:cols) = mconcat [
validateNames $ colName d == colName c
-- bigserial == bigint + autoincrement and there is no
Expand Down Expand Up @@ -459,10 +529,13 @@ checkDBStructure options tables = fmap mconcat .
-- * all 'mgrFrom' are less than table version number of the table in
-- the 'tables' list
checkDBConsistency
:: forall m. (MonadDB m, MonadLog m, MonadThrow m)
:: forall m. (MonadDB m, MonadLog m, MonadMask m)
=> ExtrasOptions -> [Domain] -> [(Table, Int32)] -> [Migration m]
-> m ()
checkDBConsistency options domains tablesWithVersions migrations = do
autoTransaction <- tsAutoTransaction <$> getTransactionSettings
unless autoTransaction $ do
error "checkDBConsistency: tsAutoTransaction setting needs to be True"
-- Check the validity of the migrations list.
validateMigrations
validateDropTableMigrations
Expand Down Expand Up @@ -682,12 +755,9 @@ checkDBConsistency options domains tablesWithVersions migrations = do
runMigration Migration{..} = do
case mgrAction of
StandardMigration mgrDo -> do
logInfo_ $ arrListTable mgrTableName <> showt mgrFrom <+> "->"
<+> showt (succ mgrFrom)
logMigration
mgrDo
runQuery_ $ sqlUpdate "table_versions" $ do
sqlSet "version" (succ mgrFrom)
sqlWhereEq "name" (T.unpack . unRawSQL $ mgrTableName)
updateTableVersion

DropTableMigration mgrDropTableMode -> do
logInfo_ $ arrListTable mgrTableName <> "drop table"
Expand All @@ -696,6 +766,32 @@ checkDBConsistency options domains tablesWithVersions migrations = do
runQuery_ $ sqlDelete "table_versions" $ do
sqlWhereEq "name" (T.unpack . unRawSQL $ mgrTableName)

CreateIndexConcurrentlyMigration tname idx -> do
logMigration
-- If migration was run before but creation of an index failed, index
-- will be left in the database in an inactive state, so when we
-- rerun, we need to remove it first (see
-- https://www.postgresql.org/docs/9.6/sql-createindex.html for more
-- information).
runQuery_ $ "DROP INDEX IF EXISTS" <+> indexName tname idx
-- We're in auto transaction mode (as ensured at the beginning of
-- 'checkDBConsistency'), so we need to issue explicit SQL commit,
-- because using 'commit' function automatically starts another
-- transaction. We don't want that as concurrent creation of index
-- won't run inside a transaction.
runSQL_ "COMMIT"
runQuery_ (sqlCreateIndexConcurrently tname idx) `finally` begin
updateTableVersion
where
logMigration = do
logInfo_ $ arrListTable mgrTableName
<> showt mgrFrom <+> "->" <+> showt (succ mgrFrom)

updateTableVersion = do
runQuery_ $ sqlUpdate "table_versions" $ do
sqlSet "version" (succ mgrFrom)
sqlWhereEq "name" (T.unpack . unRawSQL $ mgrTableName)

runMigrations :: [(Text, Int32)] -> m ()
runMigrations dbTablesWithVersions = do
let migrationsToRun = findMigrationsToRun dbTablesWithVersions
Expand Down
17 changes: 8 additions & 9 deletions src/Database/PostgreSQL/PQTypes/Checks/Util.hs
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ module Database.PostgreSQL.PQTypes.Checks.Util (
checkEquality,
checkNames,
checkPKPresence,
tableHasLess,
tableHasMore,
objectHasLess,
objectHasMore,
arrListTable
) where

Expand Down Expand Up @@ -152,15 +152,14 @@ checkPKPresence tableName mdef mpk =
, " has no primary key defined "
, " (" <> (mintercalate ", " msgs) <> ")"]


tableHasLess :: Show t => Text -> t -> Text
tableHasLess ptype missing =
"Table in the database has *less*" <+> ptype <+>
objectHasLess :: Show t => Text -> Text -> t -> Text
objectHasLess otype ptype missing =
otype <+> "in the database has *less*" <+> ptype <+>
"than its definition (missing:" <+> T.pack (show missing) <> ")"

tableHasMore :: Show t => Text -> t -> Text
tableHasMore ptype extra =
"Table in the database has *more*" <+> ptype <+>
objectHasMore :: Show t => Text -> Text -> t -> Text
objectHasMore otype ptype extra =
otype <+> "in the database has *more*" <+> ptype <+>
"than its definition (extra:" <+> T.pack (show extra) <> ")"

arrListTable :: RawSQL () -> Text
Expand Down
4 changes: 2 additions & 2 deletions src/Database/PostgreSQL/PQTypes/Model/Check.hs
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ import Prelude
data Check = Check {
chkName :: RawSQL ()
, chkCondition :: RawSQL ()
, chkValidated :: Bool -- ^ Set to 'True' if check is created as NOT VALID and
-- not validated afterwards.
, chkValidated :: Bool -- ^ Set to 'False' if check is created as NOT VALID and
-- left in such state (for whatever reason).
} deriving (Eq, Ord, Show)

tblCheck :: Check
Expand Down
74 changes: 53 additions & 21 deletions src/Database/PostgreSQL/PQTypes/Model/CompositeType.hs
Original file line number Diff line number Diff line change
@@ -1,14 +1,20 @@
module Database.PostgreSQL.PQTypes.Model.CompositeType (
CompositeType(..)
, CompositeColumn(..)
, defineComposites
, compositeTypePqFormat
, sqlCreateComposite
, sqlDropComposite
, getDBCompositeTypes
) where

import Data.Int
import Data.Monoid.Utils
import Database.PostgreSQL.PQTypes
import Prelude
import qualified Data.ByteString as BS
import qualified Data.Text.Encoding as T

import Database.PostgreSQL.PQTypes.Model.ColumnType
import Database.PostgreSQL.PQTypes.SQL.Builder

data CompositeType = CompositeType {
ctName :: !(RawSQL ())
Expand All @@ -20,24 +26,50 @@ data CompositeColumn = CompositeColumn {
, ccType :: ColumnType
} deriving (Eq, Ord, Show)

-- | Composite types are static in a sense that they can either
-- be created or dropped, altering them is not possible. Therefore
-- they are not part of the migration process. This is not a problem
-- since their exclusive usage is for intermediate representation
-- of complex nested data structures fetched from the database.
defineComposites :: MonadDB m => [CompositeType] -> m ()
defineComposites ctypes = do
mapM_ (runQuery_ . sqlDropComposite) $ reverse ctypes
mapM_ (runQuery_ . sqlCreateComposite) $ ctypes
-- | Convenience function for converting CompositeType definition to
-- corresponding 'pqFormat' definition.
compositeTypePqFormat :: CompositeType -> BS.ByteString
compositeTypePqFormat ct = "%" `BS.append` T.encodeUtf8 (unRawSQL $ ctName ct)

-- | Make SQL query that creates a composite type.
sqlCreateComposite :: CompositeType -> RawSQL ()
sqlCreateComposite CompositeType{..} = smconcat [
"CREATE TYPE"
, ctName
, "AS ("
, mintercalate ", " $ map columnToSQL ctColumns
, ")"
]
where
sqlCreateComposite CompositeType{..} = smconcat [
"CREATE TYPE"
, ctName
, "AS ("
, mintercalate ", " $ map columnToSQL ctColumns
, ")"
]
where
columnToSQL CompositeColumn{..} = ccName <+> columnTypeToSQL ccType
columnToSQL CompositeColumn{..} = ccName <+> columnTypeToSQL ccType

-- | Make SQL query that drops a composite type.
sqlDropComposite :: RawSQL () -> RawSQL ()
sqlDropComposite = ("DROP TYPE" <+>)

sqlDropComposite = ("DROP TYPE IF EXISTS" <+>) . ctName
----------------------------------------

-- | Get composite types defined in the database.
getDBCompositeTypes :: forall m. MonadDB m => m [CompositeType]
getDBCompositeTypes = do
runQuery_ . sqlSelect "pg_catalog.pg_class c" $ do
sqlResult "c.relname::text"
sqlResult "c.oid::int4"
sqlWhere "pg_catalog.pg_table_is_visible(c.oid)"
sqlWhereEq "c.relkind" 'c'
sqlOrderBy "c.relname"
mapM getComposite =<< fetchMany id
where
getComposite :: (String, Int32) -> m CompositeType
getComposite (name, oid) = do
runQuery_ . sqlSelect "pg_catalog.pg_attribute a" $ do
sqlResult "a.attname::text"
sqlResult "pg_catalog.format_type(a.atttypid, a.atttypmod)"
sqlWhereEq "a.attrelid" oid
sqlOrderBy "a.attnum"
columns <- fetchMany fetch
return CompositeType { ctName = unsafeSQL name, ctColumns = columns }
where
fetch :: (String, ColumnType) -> CompositeColumn
fetch (cname, ctype) =
CompositeColumn { ccName = unsafeSQL cname, ccType = ctype }
4 changes: 2 additions & 2 deletions src/Database/PostgreSQL/PQTypes/Model/ForeignKey.hs
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ data ForeignKey = ForeignKey {
, fkOnDelete :: ForeignKeyAction
, fkDeferrable :: Bool
, fkDeferred :: Bool
, fkValidated :: Bool -- ^ Set to 'True' if foreign key is created as NOT VALID
-- and not validated afterwards.
, fkValidated :: Bool -- ^ Set to 'False' if foreign key is created as NOT
-- VALID and left in such state (for whatever reason).
} deriving (Eq, Ord, Show)

data ForeignKeyAction
Expand Down
13 changes: 7 additions & 6 deletions src/Database/PostgreSQL/PQTypes/Model/Index.hs
Original file line number Diff line number Diff line change
Expand Up @@ -150,12 +150,13 @@ sqlCreateIndexConcurrently = sqlCreateIndex_ True

sqlCreateIndex_ :: Bool -> RawSQL () -> TableIndex -> RawSQL ()
sqlCreateIndex_ concurrently tname idx@TableIndex{..} = mconcat [
"CREATE "
, if idxUnique then "UNIQUE " else ""
, "INDEX " <+> indexName tname idx
, if concurrently then " CONCURRENTLY" else ""
, " ON" <+> tname <+> ""
, "USING" <+> (rawSQL (T.pack . show $ idxMethod) ()) <+> "("
"CREATE"
, if idxUnique then " UNIQUE" else ""
, " INDEX "
, if concurrently then "CONCURRENTLY " else ""
, indexName tname idx
, " ON" <+> tname
, " USING" <+> (rawSQL (T.pack . show $ idxMethod) ()) <+> "("
, mintercalate ", " idxColumns
, ")"
, maybe "" (" WHERE" <+>) idxWhere
Expand Down
Loading

0 comments on commit fecf157

Please sign in to comment.