Skip to content
This repository has been archived by the owner on Dec 8, 2022. It is now read-only.

Commit

Permalink
[CAD-779] Add a database backend.
Browse files Browse the repository at this point in the history
  • Loading branch information
ksaric committed Jun 11, 2020
1 parent 206f2a0 commit cdfa037
Show file tree
Hide file tree
Showing 23 changed files with 1,970 additions and 40 deletions.
66 changes: 66 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -90,3 +90,69 @@ You need an HTTP server to serve it from and simply point to the application por
Run the application, go to the local port http://localhost:3000/swagger.json and copy the content into https://editor.swagger.io/
Voila! You got it, the spec is there.

## How to run

### Create DB

You first need to create the database. You can provide your own path, the example will use the default location. We need the PostgreSQL database and we create it with:
```
PGPASSFILE=config/pgpass ./scripts/postgresql-setup.sh --createdb
```
Or if it needs to be recreated:
```
PGPASSFILE=config/pgpass ./scripts/postgresql-setup.sh --recreatedb
```

After that we need to run the migrations (if there are any):
```
PGPASSFILE=config/pgpass stack run smash-exe -- run-migrations --mdir ./schema
```

And after that we can run additional migration scripts if they need to be created:
```
PGPASSFILE=config/pgpass stack run smash-exe -- create-migration --mdir ./schema
```

To show all tables:
```
\dt
```

To show details about specific table:
```
\d+ TABLE_NAME
```

For example:
```
\d+ block
```

Dumping the schema:
```
pg_dump -c -s --no-owner cexplorer > cexplorer.sql
```

## Inserting pool metadata


This is an example (we got the hash from Blake2 256):
```
stack exec smash-exe -- insert-pool --filepath test_pool.json --poolhash "\253\178\140~3\202\&1\a\174\148\177rt\225\180\&8XQ\128\200\236\US\241\241\237oP\142\174A\172\188"
```

## Test script

An example of how the whole thing works.
```
PGPASSFILE=config/pgpass ./scripts/postgresql-setup.sh --recreatedb
PGPASSFILE=config/pgpass stack run smash-exe -- run-migrations --mdir ./schema
PGPASSFILE=config/pgpass stack run smash-exe -- create-migration --mdir ./schema
PGPASSFILE=config/pgpass stack run smash-exe -- run-migrations --mdir ./schema
PGPASSFILE=config/pgpass stack run smash-exe -- insert-pool --filepath test_pool.json --poolhash "cbdfc4f21feb0a414b2b9471fa56b0ebd312825e63db776d68cc3fa0ca1f5a2f"
PGPASSFILE=config/pgpass stack run smash-exe -- run-app
```

After the server is running, you can check the hash on http://localhost:3100/api/v1/metadata/cbdfc4f21feb0a414b2b9471fa56b0ebd312825e63db776d68cc3fa0ca1f5a2f to see it return the JSON metadata.
129 changes: 128 additions & 1 deletion app/Main.hs
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,132 @@ import Cardano.Prelude

import Lib

import DB

import Control.Applicative (optional)

import Data.Monoid ((<>))

import Options.Applicative (Parser, ParserInfo, ParserPrefs)
import qualified Options.Applicative as Opt


main :: IO ()
main = runApp defaultConfiguration
main = do
Opt.customExecParser p opts >>= runCommand
where
opts :: ParserInfo Command
opts = Opt.info (Opt.helper <*> pVersion <*> pCommand)
( Opt.fullDesc
<> Opt.header "SMASH - Manage the Stakepool Metadata Aggregation Server"
)

p :: ParserPrefs
p = Opt.prefs Opt.showHelpOnEmpty

-- -----------------------------------------------------------------------------

data Command
= CreateMigration MigrationDir
| RunMigrations MigrationDir (Maybe LogFileDir)
| RunApplication
| InsertPool FilePath Text

runCommand :: Command -> IO ()
runCommand cmd =
case cmd of
CreateMigration mdir -> doCreateMigration mdir
RunMigrations mdir mldir -> runMigrations (\pgConfig -> pgConfig) False mdir mldir
RunApplication -> runApp defaultConfiguration
InsertPool poolMetadataJsonPath poolHash -> do
putTextLn "Inserting pool metadata!"
result <- runPoolInsertion poolMetadataJsonPath poolHash
either (\_ -> putTextLn "Error occured!") (\_ -> putTextLn "Completed") result

doCreateMigration :: MigrationDir -> IO ()
doCreateMigration mdir = do
mfp <- createMigration mdir
case mfp of
Nothing -> putTextLn "No migration needed."
Just fp -> putTextLn $ toS ("New migration '" ++ fp ++ "' created.")

-------------------------------------------------------------------------------

pVersion :: Parser (a -> a)
pVersion =
Opt.infoOption "cardano-db-tool version 0.1.0.0"
( Opt.long "version"
<> Opt.short 'v'
<> Opt.help "Print the version and exit"
)

pCommand :: Parser Command
pCommand =
Opt.subparser
( Opt.command "create-migration"
( Opt.info pCreateMigration
$ Opt.progDesc "Create a database migration (only really used by devs)."
)
<> Opt.command "run-migrations"
( Opt.info pRunMigrations
$ Opt.progDesc "Run the database migrations (which are idempotent)."
)
<> Opt.command "run-app"
( Opt.info pRunApp
$ Opt.progDesc "Run the actual application."
)
<> Opt.command "insert-pool"
( Opt.info pInsertPool
$ Opt.progDesc "Inserts the pool into the database (utility)."
)
)
where
pCreateMigration :: Parser Command
pCreateMigration =
CreateMigration <$> pMigrationDir

pRunMigrations :: Parser Command
pRunMigrations =
RunMigrations <$> pMigrationDir <*> optional pLogFileDir

-- Empty right now but we might add some params over time. Like ports and stuff?
pRunApp :: Parser Command
pRunApp =
pure RunApplication

-- Empty right now but we might add some params over time.
pInsertPool :: Parser Command
pInsertPool =
InsertPool <$> pFilePath <*> pPoolHash

pFilePath :: Parser FilePath
pFilePath =
Opt.strOption
( Opt.long "filepath"
<> Opt.help "The JSON metadata filepath location."
<> Opt.completer (Opt.bashCompleter "directory")
)

pPoolHash :: Parser Text
pPoolHash =
Opt.strOption
( Opt.long "poolhash"
<> Opt.help "The JSON metadata Blake2 256 hash."
)

pMigrationDir :: Parser MigrationDir
pMigrationDir =
MigrationDir <$> Opt.strOption
( Opt.long "mdir"
<> Opt.help "The directory containing the migrations."
<> Opt.completer (Opt.bashCompleter "directory")
)

pLogFileDir :: Parser LogFileDir
pLogFileDir =
LogFileDir <$> Opt.strOption
( Opt.long "ldir"
<> Opt.help "The directory to write the log to."
<> Opt.completer (Opt.bashCompleter "directory")
)

1 change: 1 addition & 0 deletions config/pgpass
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/var/run/postgresql:5432:smash:*:*
22 changes: 22 additions & 0 deletions schema/migration-1-0000-20200610.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
-- Hand written migration that creates a 'schema_version' table and initializes it.

CREATE FUNCTION init() RETURNS void AS $$

DECLARE
emptyDB boolean;

BEGIN
SELECT NOT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name='schema_version') INTO emptyDB;
IF emptyDB THEN
CREATE TABLE "schema_version" (id SERIAL PRIMARY KEY UNIQUE, stage_one INT8 NOT NULL, stage_two INT8 NOT NULL, stage_three INT8 NOT NULL);
INSERT INTO "schema_version" (stage_one, stage_two, stage_three) VALUES (0, 0, 0);

RAISE NOTICE 'DB has been initialized';
END IF;
END;

$$ LANGUAGE plpgsql;

SELECT init();

DROP FUNCTION init();
33 changes: 33 additions & 0 deletions schema/migration-1-0001-20200611.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
-- Hand written migration to create the custom types with 'DOMAIN' statements.

CREATE FUNCTION migrate() RETURNS void AS $$

DECLARE
next_version int;

BEGIN
SELECT stage_one + 1 INTO next_version FROM "schema_version";
IF next_version = 1 THEN
CREATE DOMAIN lovelace AS bigint CHECK (VALUE >= 0 AND VALUE <= 45000000000000000);
CREATE DOMAIN txindex AS smallint CHECK (VALUE >= 0 AND VALUE < 1024);
CREATE DOMAIN uinteger AS integer CHECK (VALUE >= 0);

-- Base16 encoded values use a 64 byte hash.
CREATE DOMAIN base16type AS bytea CHECK (octet_length (VALUE) = 64);

-- Blocks, transactions and merkel roots use a 32 byte hash.
CREATE DOMAIN hash32type AS bytea CHECK (octet_length (VALUE) = 32);

-- Addresses use a 28 byte hash (as do StakeholdIds).
CREATE DOMAIN hash28type AS bytea CHECK (octet_length (VALUE) = 28);

UPDATE "schema_version" SET stage_one = 1;
RAISE NOTICE 'DB has been migrated to stage_one version %', next_version;
END IF;
END;

$$ LANGUAGE plpgsql;

SELECT migrate();

DROP FUNCTION migrate();
20 changes: 20 additions & 0 deletions schema/migration-2-0001-20200611.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
-- Persistent generated migration.

CREATE FUNCTION migrate() RETURNS void AS $$
DECLARE
next_version int ;
BEGIN
SELECT stage_two + 1 INTO next_version FROM schema_version ;
IF next_version = 1 THEN
CREATe TABLE "tx_metadata"("id" SERIAL8 PRIMARY KEY UNIQUE,"hash" base16type NOT NULL,"metadata" json NOT NULL);
ALTER TABLE "tx_metadata" ADD CONSTRAINT "unique_tx_metadata" UNIQUE("hash");
-- Hand written SQL statements can be added here.
UPDATE schema_version SET stage_two = 1 ;
RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ;
END IF ;
END ;
$$ LANGUAGE plpgsql ;

SELECT migrate() ;

DROP FUNCTION migrate() ;
Loading

0 comments on commit cdfa037

Please sign in to comment.