From 3040da9c8ad5c3e4e6464cf4840a513644f3ad7b Mon Sep 17 00:00:00 2001 From: Alonso Date: Mon, 30 Jan 2023 13:12:03 +0100 Subject: [PATCH 1/6] migration test + indexes --- db/migrations/state/0002_test.go | 70 +++++++++++++++++ db/migrations/state/0003_test.go | 33 ++++++++ db/migrations/state/0004_test.go | 71 +++++++++++++++++ db/migrations/state/0005.sql | 2 +- db/migrations/state/0005_test.go | 125 ++++++++++++++++++++++++++++++ db/migrations/state/0006.sql | 15 ++++ db/migrations/state/utils_test.go | 119 ++++++++++++++++++++++++++++ test/dbutils/dbutils.go | 2 +- 8 files changed, 435 insertions(+), 2 deletions(-) create mode 100644 db/migrations/state/0002_test.go create mode 100644 db/migrations/state/0003_test.go create mode 100644 db/migrations/state/0004_test.go create mode 100644 db/migrations/state/0005_test.go create mode 100644 db/migrations/state/0006.sql create mode 100644 db/migrations/state/utils_test.go diff --git a/db/migrations/state/0002_test.go b/db/migrations/state/0002_test.go new file mode 100644 index 0000000000..56423da72d --- /dev/null +++ b/db/migrations/state/0002_test.go @@ -0,0 +1,70 @@ +package migrations_test + +import ( + "testing" + "time" + "database/sql" + + "github.com/stretchr/testify/assert" +) + +// This migration creates a different proof table dropping all the information. + +type migrationTest0002 struct{} + +func (m migrationTest0002) InsertData(db *sql.DB) error { + // Insert block to respect the FKey + const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES ($1, $2, $3)" + if _, err := db.Exec(addBlock, 1, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil { + return err + } + // Insert batch to respect the FKey + _, err := db.Exec("INSERT INTO state.batch (batch_num) VALUES (1)") + if err != nil { + return err + } + // Insert old proof + const insertProof = `INSERT INTO state.proof ( + batch_num, proof, proof_id, input_prover, prover + ) VALUES ( + 1,'{}','proof_identifier','{}','prover 1' + );` + _, err = db.Exec(insertProof) + return err +} + + +func (m migrationTest0002) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + // Insert new proof + const insertProof = `INSERT INTO state.proof ( + batch_num, batch_num_final, proof, proof_id, input_prover, prover, generating + ) VALUES ( + 1, 1, '{}','proof_identifier','{}','prover 1', true + );` + _, err := db.Exec(insertProof) + assert.NoError(t, err) +} + +func (m migrationTest0002) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + // Insert new proof + const insertNewProof = `INSERT INTO state.proof ( + batch_num, batch_num_final, proof, proof_id, input_prover, prover, generating + ) VALUES ( + 1, 1, '{}','proof_identifier','{}','prover 1', true + );` + _, err := db.Exec(insertNewProof) + assert.Error(t, err) + + // Insert old proof + const insertProof = `INSERT INTO state.proof ( + batch_num, proof, proof_id, input_prover, prover + ) VALUES ( + 1,'{}','proof_identifier','{}','prover 1' + );` + _, err = db.Exec(insertProof) + assert.NoError(t, err) +} + +func TestMigration0002(t *testing.T) { + runMigrationTest(t, 2, migrationTest0002{}) +} diff --git a/db/migrations/state/0003_test.go b/db/migrations/state/0003_test.go new file mode 100644 index 0000000000..f503ed8759 --- /dev/null +++ b/db/migrations/state/0003_test.go @@ -0,0 +1,33 @@ +package migrations_test + +import ( + "database/sql" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// This migration adds the column `eth_tx_hash` on `batch` + +type migrationTest0003 struct{} + +func (m migrationTest0003) InsertData(db *sql.DB) error { + return nil +} + +func (m migrationTest0003) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + const insertDebug = `INSERT INTO state.debug (error_type, timestamp, payload) VALUES ('error type', $1, 'payload stored')` + _, err := db.Exec(insertDebug, time.Now()) + assert.NoError(t, err) +} + +func (m migrationTest0003) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + const insertDebug = `INSERT INTO state.debug (error_type, timestamp, payload) VALUES ('error type', $1, 'payload stored')` + _, err := db.Exec(insertDebug, time.Now()) + assert.Error(t, err) +} + +func TestMigration0003(t *testing.T) { + runMigrationTest(t, 3, migrationTest0003{}) +} diff --git a/db/migrations/state/0004_test.go b/db/migrations/state/0004_test.go new file mode 100644 index 0000000000..88a7d5f2d1 --- /dev/null +++ b/db/migrations/state/0004_test.go @@ -0,0 +1,71 @@ +package migrations_test + +import ( + "database/sql" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" +) + +// This migration creates the fiat table + +type migrationTest0004 struct{} + +func (m migrationTest0004) InsertData(db *sql.DB) error { + // Insert block to respect the FKey + const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES ($1, $2, $3)" + if _, err := db.Exec(addBlock, 1, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil { + return err + } + const addForcedBatch = "INSERT INTO state.forced_batch (forced_batch_num, global_exit_root, raw_txs_data, coinbase, timestamp, block_num, batch_num) VALUES ($1, $2, $3, $4, $5, $6, $7)" + if _, err := db.Exec(addForcedBatch, 1, "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", "", "0x2536C2745Ac4A584656A830f7bdCd329c94e8F30", time.Now(), 1, 1); err != nil { + return err + } + // Insert batch + _, err := db.Exec(`INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, state_root, acc_input_hash, timestamp, coinbase, raw_txs_data) + VALUES (1, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + $1, '0x2536C2745Ac4A584656A830f7bdCd329c94e8F30', $2)`, time.Now(), common.HexToHash("0x29e885edaf8e0000000000000000a23cf2d7d9f1")) + if err != nil { + return err + } + return nil +} + +func (m migrationTest0004) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + // Insert batch + _, err := db.Exec(`INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, state_root, acc_input_hash, timestamp, coinbase, raw_txs_data, forced_batch_num) + VALUES (2, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + $1, '0x2536C2745Ac4A584656A830f7bdCd329c94e8F30', $2, 1)`, time.Now(), common.HexToHash("0x29e885edaf8e0000000000000000a23cf2d7d9f1")) + assert.NoError(t, err) + addForcedBatch := "INSERT INTO state.forced_batch (forced_batch_num, global_exit_root, raw_txs_data, coinbase, timestamp, block_num, batch_num) VALUES ($1, $2, $3, $4, $5, $6, $7)" + _, err = db.Exec(addForcedBatch, 1, "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", "", "0x2536C2745Ac4A584656A830f7bdCd329c94e8F30", time.Now(), 1, 2) + assert.Error(t, err) + addForcedBatch = "INSERT INTO state.forced_batch (forced_batch_num, global_exit_root, raw_txs_data, coinbase, timestamp, block_num) VALUES ($1, $2, $3, $4, $5, $6)" + _, err = db.Exec(addForcedBatch, 2, "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", "", "0x2536C2745Ac4A584656A830f7bdCd329c94e8F30", time.Now(), 1) + assert.NoError(t, err) +} + +func (m migrationTest0004) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + const addForcedBatch = "INSERT INTO state.forced_batch (forced_batch_num, global_exit_root, raw_txs_data, coinbase, timestamp, block_num, batch_num) VALUES ($1, $2, $3, $4, $5, $6, $7)" + _, err := db.Exec(addForcedBatch, 3, "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", "", "0x2536C2745Ac4A584656A830f7bdCd329c94e8F30", time.Now(), 1, 1) + assert.NoError(t, err) + // Insert batch + _, err = db.Exec(`INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, state_root, acc_input_hash, timestamp, coinbase, raw_txs_data, forced_batch_num) + VALUES (3, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + $1, '0x2536C2745Ac4A584656A830f7bdCd329c94e8F30', $2, 1)`, time.Now(), common.HexToHash("0x29e885edaf8e0000000000000000a23cf2d7d9f1")) + assert.Error(t, err) + _, err = db.Exec(`INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, state_root, acc_input_hash, timestamp, coinbase, raw_txs_data) + VALUES (3, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + $1, '0x2536C2745Ac4A584656A830f7bdCd329c94e8F30', $2)`, time.Now(), common.HexToHash("0x29e885edaf8e0000000000000000a23cf2d7d9f1")) + assert.NoError(t, err) +} + +func TestMigration0004(t *testing.T) { + runMigrationTest(t, 4, migrationTest0004{}) +} diff --git a/db/migrations/state/0005.sql b/db/migrations/state/0005.sql index 5741e9f5d5..c63867455b 100644 --- a/db/migrations/state/0005.sql +++ b/db/migrations/state/0005.sql @@ -25,4 +25,4 @@ CREATE TABLE state.monitored_txs ); ALTER TABLE state.verified_batch -ADD COLUMN is_trusted BOOLEAN DEFAULT true; \ No newline at end of file +ADD COLUMN is_trusted BOOLEAN DEFAULT true; diff --git a/db/migrations/state/0005_test.go b/db/migrations/state/0005_test.go new file mode 100644 index 0000000000..9a8e5c4565 --- /dev/null +++ b/db/migrations/state/0005_test.go @@ -0,0 +1,125 @@ +package migrations_test + +import ( + "database/sql" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" +) + +// this migration changes length of the token name +type migrationTest0005 struct{} + +func (m migrationTest0005) InsertData(db *sql.DB) error { + // Insert block to respect the FKey + const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES ($1, $2, $3)" + if _, err := db.Exec(addBlock, 1, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil { + return err + } + // Insert batch + _, err := db.Exec(`INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, state_root, acc_input_hash, timestamp, coinbase, raw_txs_data) + VALUES (1, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + $1, '0x2536C2745Ac4A584656A830f7bdCd329c94e8F30', $2)`, time.Now(), common.HexToHash("0x29e885edaf8e0000000000000000a23cf2d7d9f1")) + if err != nil { + return err + } + // Insert virtual batch + const insertVirtualBatch = `INSERT INTO state.virtual_batch ( + batch_num, tx_hash, coinbase, block_num + ) VALUES ( + 1, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x514910771af9ca656af840dff83e8264ecf986ca', 1);` + _, err = db.Exec(insertVirtualBatch) + if err != nil { + return err + } + // Insert verified batch + const insertVerifiedBatch = `INSERT INTO state.verified_batch ( + batch_num, tx_hash, aggregator, state_root, block_num + ) VALUES ( + 1, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x514910771af9ca656af840dff83e8264ecf986ca', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', 1 + );` + _, err = db.Exec(insertVerifiedBatch) + return err +} + +func (m migrationTest0005) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + // Insert batch + _, err := db.Exec(`INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, state_root, acc_input_hash, timestamp, coinbase, raw_txs_data) + VALUES (2, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + $1, '0x2536C2745Ac4A584656A830f7bdCd329c94e8F30', $2)`, time.Now(), common.HexToHash("0x29e885edaf8e0000000000000000a23cf2d7d9f1")) + assert.NoError(t, err) + // Insert virtual batch + const insertVirtualBatch = `INSERT INTO state.virtual_batch ( + batch_num, tx_hash, coinbase, block_num + ) VALUES ( + 2, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x514910771af9ca656af840dff83e8264ecf986ca', 1);` + _, err = db.Exec(insertVirtualBatch) + assert.NoError(t, err) + + // Insert verified batch + const insertVerifiedBatch = `INSERT INTO state.verified_batch ( + batch_num, tx_hash, aggregator, state_root, block_num, is_trusted + ) VALUES ( + 2, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x514910771af9ca656af840dff83e8264ecf986ca', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', 1, true + );` + _, err = db.Exec(insertVerifiedBatch) + assert.NoError(t, err) + + // Insert monitored_txs + const insertMonitoredTxs = `INSERT INTO state.monitored_txs ( + owner, id, from_addr, to_addr, nonce, value, data, gas, gas_price, status, block_num, created_at, updated_at + ) VALUES ( + '0x514910771af9ca656af840dff83e8264ecf986ca', '1', '0x514910771af9ca656af840dff83e8264ecf986ca', '0x514910771af9ca656af840dff83e8264ecf986ca', 1, 0, '0x', 100, 12, 'created', 1, $1, $2 + );` + _, err = db.Exec(insertMonitoredTxs, time.Now(), time.Now()) + assert.NoError(t, err) +} + +func (m migrationTest0005) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + // Insert batch + _, err := db.Exec(`INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, state_root, acc_input_hash, timestamp, coinbase, raw_txs_data) + VALUES (3, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', + $1, '0x2536C2745Ac4A584656A830f7bdCd329c94e8F30', $2)`, time.Now(), common.HexToHash("0x29e885edaf8e0000000000000000a23cf2d7d9f1")) + assert.NoError(t, err) + // Insert virtual batch + const insertVirtualBatch = `INSERT INTO state.virtual_batch ( + batch_num, tx_hash, coinbase, block_num + ) VALUES ( + 3, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x514910771af9ca656af840dff83e8264ecf986ca', 1);` + _, err = db.Exec(insertVirtualBatch) + assert.NoError(t, err) + + // Insert verified batch + insertVerifiedBatch := `INSERT INTO state.verified_batch ( + batch_num, tx_hash, aggregator, state_root, block_num, is_trusted + ) VALUES ( + 3, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x514910771af9ca656af840dff83e8264ecf986ca', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', 1, true + );` + _, err = db.Exec(insertVerifiedBatch) + assert.Error(t, err) + insertVerifiedBatch = `INSERT INTO state.verified_batch ( + batch_num, tx_hash, aggregator, state_root, block_num + ) VALUES ( + 3, '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', '0x514910771af9ca656af840dff83e8264ecf986ca', '0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1', 1 + );` + _, err = db.Exec(insertVerifiedBatch) + assert.NoError(t, err) + + // Insert monitored_txs + const insertMonitoredTxs = `INSERT INTO state.monitored_txs ( + owner, id, from_addr, to_addr, nonce, value, data, gas, gas_price, status, block_num, created_at, updated_at + ) VALUES ( + '0x514910771af9ca656af840dff83e8264ecf986ca', '1', '0x514910771af9ca656af840dff83e8264ecf986ca', '0x514910771af9ca656af840dff83e8264ecf986ca', 1, 0, '0x', 100, 12, 'created', 1, $1, $2 + );` + _, err = db.Exec(insertMonitoredTxs, time.Now(), time.Now()) + assert.Error(t, err) +} + +func TestMigration0005(t *testing.T) { + runMigrationTest(t, 5, migrationTest0005{}) +} diff --git a/db/migrations/state/0006.sql b/db/migrations/state/0006.sql new file mode 100644 index 0000000000..0dca90e59e --- /dev/null +++ b/db/migrations/state/0006.sql @@ -0,0 +1,15 @@ +-- +migrate Up +CREATE INDEX IF NOT EXISTS transaction_l2_block_num_idx ON state.transaction (l2_block_num); +CREATE INDEX IF NOT EXISTS l2block_batch_num_idx ON state.l2block (batch_num); +CREATE INDEX IF NOT EXISTS l2block_received_at_idx ON state.l2block (received_at); +CREATE INDEX IF NOT EXISTS batch_timestamp_idx ON state.batch ("timestamp"); +CREATE INDEX IF NOT EXISTS log_tx_hash_idx ON state.log (tx_hash); +CREATE INDEX IF NOT EXISTS log_address_idx ON state.log (address); + +-- +migrate Down +DROP INDEX IF EXISTS transaction_l2_block_num_idx; +DROP INDEX IF EXISTS l2block_batch_num_idx; +DROP INDEX IF EXISTS l2block_received_at_idx; +DROP INDEX IF EXISTS batch_timestamp_idx; +DROP INDEX IF EXISTS log_tx_hash_idx; +DROP INDEX IF EXISTS log_address_idx; diff --git a/db/migrations/state/utils_test.go b/db/migrations/state/utils_test.go new file mode 100644 index 0000000000..dfe71acbd2 --- /dev/null +++ b/db/migrations/state/utils_test.go @@ -0,0 +1,119 @@ +package migrations_test + +import ( + "database/sql" + "fmt" + "testing" + + "github.com/0xPolygonHermez/zkevm-node/db" + "github.com/0xPolygonHermez/zkevm-node/test/dbutils" + "github.com/gobuffalo/packr/v2" + + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/stdlib" + migrate "github.com/rubenv/sql-migrate" + "github.com/stretchr/testify/require" +) + +/* + Considerations tricks and tips for migration file testing: + + - Functionality of the DB is tested by the rest of the packages, migration tests only have to check persistence across migrations (both UP and DOWN) + - It's recommended to use real data (from testnet/mainnet), but modifying NULL fields to check that those are migrated properly + - It's recommended to use some SQL tool (such as DBeaver) that generates insert queries from existing rows + - Any new migration file could be tested using the existing `migrationTester` interface. Check `0002_test.go` for an example +*/ + +func init() { + log.Init(log.Config{ + Level: "debug", + Outputs: []string{"stderr"}, + }) +} + +type migrationTester interface { + // InsertData used to insert data in the affected tables of the migration that is being tested + // data will be inserted with the schema as it was previous the migration that is being tested + InsertData(*sql.DB) error + // RunAssertsAfterMigrationUp this function will be called after running the migration is being tested + // and should assert that the data inserted in the function InsertData is persisted properly + RunAssertsAfterMigrationUp(*testing.T, *sql.DB) + // RunAssertsAfterMigrationDown this function will be called after reverting the migration that is being tested + // and should assert that the data inserted in the function InsertData is persisted properly + RunAssertsAfterMigrationDown(*testing.T, *sql.DB) +} + +var ( + stateDBCfg = dbutils.NewStateConfigFromEnv() + packrMigrations = map[string]*packr.Box{ + db.StateMigrationName: packr.New(db.StateMigrationName, "./migrations/state"), + db.PoolMigrationName: packr.New(db.PoolMigrationName, "./migrations/pool"), + } +) + +func runMigrationTest(t *testing.T, migrationNumber int, miter migrationTester) { + // Initialize an empty DB + d, err := initCleanSQLDB() + require.NoError(t, err) + require.NoError(t, runMigrationsDown(d, 0, db.StateMigrationName)) + // Run migrations until migration to test + require.NoError(t, runMigrationsUp(d, migrationNumber-1, db.StateMigrationName)) + // Insert data into table(s) affected by migration + require.NoError(t, miter.InsertData(d)) + // Run migration that is being tested + require.NoError(t, runMigrationsUp(d, 1, db.StateMigrationName)) + // Check that data is persisted properly after migration up + miter.RunAssertsAfterMigrationUp(t, d) + // Revert migration to test + require.NoError(t, runMigrationsDown(d, 1, db.StateMigrationName)) + // Check that data is persisted properly after migration down + miter.RunAssertsAfterMigrationDown(t, d) +} + +func initCleanSQLDB() (*sql.DB, error) { + // run migrations + if err := db.RunMigrationsDown(stateDBCfg, db.StateMigrationName); err != nil { + return nil, err + } + c, err := pgx.ParseConfig(fmt.Sprintf("postgres://%s:%s@%s:%s/%s", stateDBCfg.User, stateDBCfg.Password, stateDBCfg.Host, stateDBCfg.Port, stateDBCfg.Name)) + if err != nil { + return nil, err + } + sqlDB := stdlib.OpenDB(*c) + return sqlDB, nil +} + +func runMigrationsUp(d *sql.DB, n int, packrName string) error { + box, ok := packrMigrations[packrName] + if !ok { + return fmt.Errorf("packr box not found with name: %v", packrName) + } + + var migrations = &migrate.PackrMigrationSource{Box: box} + nMigrations, err := migrate.ExecMax(d, "postgres", migrations, migrate.Up, n) + if err != nil { + return err + } + if nMigrations != n { + return fmt.Errorf("Unexpected amount of migrations: expected: %d, actual: %d", n, nMigrations) + } + return nil +} + +func runMigrationsDown(d *sql.DB, n int, packrName string) error { + box, ok := packrMigrations[packrName] + if !ok { + return fmt.Errorf("packr box not found with name: %v", packrName) + } + + var migrations = &migrate.PackrMigrationSource{Box: box} + nMigrations, err := migrate.ExecMax(d, "postgres", migrations, migrate.Down, n) + if err != nil { + return err + } + if nMigrations != n { + return fmt.Errorf("Unexpected amount of migrations: expected: %d, actual: %d", n, nMigrations) + } + return nil +} \ No newline at end of file diff --git a/test/dbutils/dbutils.go b/test/dbutils/dbutils.go index 2409831084..8723fc065e 100644 --- a/test/dbutils/dbutils.go +++ b/test/dbutils/dbutils.go @@ -21,7 +21,7 @@ func InitOrResetPool(cfg db.Config) error { } // initOrReset will initializes the db running the migrations or -// will reset all the known data and rerun the migrations +// will reset all the known data and return the migrations func initOrReset(cfg db.Config, name string) error { log.Infof("running migrations for %v", name) // connect to database From 696b349c703fa8d56e0cef61e75ad59f057e15bc Mon Sep 17 00:00:00 2001 From: Alonso Date: Mon, 30 Jan 2023 13:42:03 +0100 Subject: [PATCH 2/6] linter --- cmd/run.go | 2 +- db/migrations/state/0002_test.go | 3 +-- db/migrations/state/utils_test.go | 7 +++---- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 5e4c2a2772..e5be1f7169 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -311,7 +311,7 @@ func startMetricsHttpServer(c *config.Config) { return } mux.Handle(metrics.Endpoint, promhttp.Handler()) - metricsServer := &http.Server{ + metricsServer := &http.Server{ // nolint Handler: mux, } log.Infof("metrics server listening on port %d", c.Metrics.Port) diff --git a/db/migrations/state/0002_test.go b/db/migrations/state/0002_test.go index 56423da72d..8ab57a217a 100644 --- a/db/migrations/state/0002_test.go +++ b/db/migrations/state/0002_test.go @@ -1,9 +1,9 @@ package migrations_test import ( + "database/sql" "testing" "time" - "database/sql" "github.com/stretchr/testify/assert" ) @@ -33,7 +33,6 @@ func (m migrationTest0002) InsertData(db *sql.DB) error { return err } - func (m migrationTest0002) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { // Insert new proof const insertProof = `INSERT INTO state.proof ( diff --git a/db/migrations/state/utils_test.go b/db/migrations/state/utils_test.go index dfe71acbd2..4284614d5e 100644 --- a/db/migrations/state/utils_test.go +++ b/db/migrations/state/utils_test.go @@ -6,10 +6,9 @@ import ( "testing" "github.com/0xPolygonHermez/zkevm-node/db" + "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/test/dbutils" "github.com/gobuffalo/packr/v2" - - "github.com/0xPolygonHermez/zkevm-node/log" "github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4/stdlib" migrate "github.com/rubenv/sql-migrate" @@ -45,7 +44,7 @@ type migrationTester interface { } var ( - stateDBCfg = dbutils.NewStateConfigFromEnv() + stateDBCfg = dbutils.NewStateConfigFromEnv() packrMigrations = map[string]*packr.Box{ db.StateMigrationName: packr.New(db.StateMigrationName, "./migrations/state"), db.PoolMigrationName: packr.New(db.PoolMigrationName, "./migrations/pool"), @@ -116,4 +115,4 @@ func runMigrationsDown(d *sql.DB, n int, packrName string) error { return fmt.Errorf("Unexpected amount of migrations: expected: %d, actual: %d", n, nMigrations) } return nil -} \ No newline at end of file +} From 0336d1402ed546a1642913bba4722312543ed84f Mon Sep 17 00:00:00 2001 From: Alonso Date: Tue, 31 Jan 2023 10:46:35 +0100 Subject: [PATCH 3/6] 0006 --- db/migrations/state/0006.sql | 12 ++++----- db/migrations/state/0006_test.go | 43 ++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 6 deletions(-) create mode 100644 db/migrations/state/0006_test.go diff --git a/db/migrations/state/0006.sql b/db/migrations/state/0006.sql index 0dca90e59e..eb03241706 100644 --- a/db/migrations/state/0006.sql +++ b/db/migrations/state/0006.sql @@ -7,9 +7,9 @@ CREATE INDEX IF NOT EXISTS log_tx_hash_idx ON state.log (tx_hash); CREATE INDEX IF NOT EXISTS log_address_idx ON state.log (address); -- +migrate Down -DROP INDEX IF EXISTS transaction_l2_block_num_idx; -DROP INDEX IF EXISTS l2block_batch_num_idx; -DROP INDEX IF EXISTS l2block_received_at_idx; -DROP INDEX IF EXISTS batch_timestamp_idx; -DROP INDEX IF EXISTS log_tx_hash_idx; -DROP INDEX IF EXISTS log_address_idx; +DROP INDEX IF EXISTS state.transaction_l2_block_num_idx; +DROP INDEX IF EXISTS state.l2block_batch_num_idx; +DROP INDEX IF EXISTS state.l2block_received_at_idx; +DROP INDEX IF EXISTS state.batch_timestamp_idx; +DROP INDEX IF EXISTS state.log_tx_hash_idx; +DROP INDEX IF EXISTS state.log_address_idx; diff --git a/db/migrations/state/0006_test.go b/db/migrations/state/0006_test.go new file mode 100644 index 0000000000..ec9a626226 --- /dev/null +++ b/db/migrations/state/0006_test.go @@ -0,0 +1,43 @@ +package migrations_test + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/assert" +) + +// this migration changes length of the token name +type migrationTest0006 struct{} + +func (m migrationTest0006) InsertData(db *sql.DB) error { + return nil +} + +var indexes = []string{"transaction_l2_block_num_idx", "l2block_batch_num_idx", "l2block_received_at_idx", + "batch_timestamp_idx", "log_tx_hash_idx", "log_address_idx"} +func (m migrationTest0006) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + for _, idx := range indexes { + // getIndex + const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;` + row := db.QueryRow(getIndex, idx) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 1, result) + } +} + +func (m migrationTest0006) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + for _, idx := range indexes { + // getIndex + const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;` + row := db.QueryRow(getIndex, idx) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 0, result) + } +} + +func TestMigration0006(t *testing.T) { + runMigrationTest(t, 6, migrationTest0006{}) +} From 029c5def6e94e957c7ee3a4453bd855473161808 Mon Sep 17 00:00:00 2001 From: Alonso Date: Tue, 31 Jan 2023 11:12:07 +0100 Subject: [PATCH 4/6] bug fixed --- state/pgstatestorage.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/state/pgstatestorage.go b/state/pgstatestorage.go index b70175b681..9ea6d01cf6 100644 --- a/state/pgstatestorage.go +++ b/state/pgstatestorage.go @@ -1149,7 +1149,8 @@ func (p *PostgresStorage) getTransactionLogs(ctx context.Context, transactionHas FROM state.log l INNER JOIN state.transaction t ON t.hash = l.tx_hash INNER JOIN state.l2block b ON b.block_num = t.l2_block_num - WHERE t.hash = $1` + WHERE t.hash = $1 + ORDER BY l.log_index ASC` rows, err := q.Query(ctx, getTransactionLogsSQL, transactionHash.String()) if !errors.Is(err, pgx.ErrNoRows) && err != nil { return nil, err @@ -1624,7 +1625,8 @@ func (p *PostgresStorage) GetLogs(ctx context.Context, fromBlock uint64, toBlock FROM state.log l INNER JOIN state.transaction t ON t.hash = l.tx_hash INNER JOIN state.l2block b ON b.block_num = t.l2_block_num - WHERE b.block_hash = $1` + WHERE b.block_hash = $1 + ORDER BY b.block_num ASC, l.log_index ASC` const getLogsByFilterSQL = ` SELECT t.l2_block_num, b.block_hash, l.tx_hash, l.log_index, l.address, l.data, l.topic0, l.topic1, l.topic2, l.topic3 FROM state.log l @@ -1636,7 +1638,7 @@ func (p *PostgresStorage) GetLogs(ctx context.Context, fromBlock uint64, toBlock AND (l.topic2 = any($6) OR $6 IS NULL) AND (l.topic3 = any($7) OR $7 IS NULL) AND (b.created_at >= $8 OR $8 IS NULL) - ORDER BY b.block_num ASC` + ORDER BY b.block_num ASC, l.log_index ASC` var err error var rows pgx.Rows From c290124ce59a42b9c95df8abd072f2622fd5e062 Mon Sep 17 00:00:00 2001 From: Alonso Date: Tue, 31 Jan 2023 11:18:10 +0100 Subject: [PATCH 5/6] linter --- db/migrations/state/0006_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/db/migrations/state/0006_test.go b/db/migrations/state/0006_test.go index ec9a626226..1d434fc2ce 100644 --- a/db/migrations/state/0006_test.go +++ b/db/migrations/state/0006_test.go @@ -15,7 +15,8 @@ func (m migrationTest0006) InsertData(db *sql.DB) error { } var indexes = []string{"transaction_l2_block_num_idx", "l2block_batch_num_idx", "l2block_received_at_idx", - "batch_timestamp_idx", "log_tx_hash_idx", "log_address_idx"} + "batch_timestamp_idx", "log_tx_hash_idx", "log_address_idx"} + func (m migrationTest0006) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { for _, idx := range indexes { // getIndex From 3f83176aa6be53a4260f7033d96dca04c58d0eb0 Mon Sep 17 00:00:00 2001 From: Alonso Date: Wed, 1 Feb 2023 11:25:02 +0100 Subject: [PATCH 6/6] suggestion + comment --- cmd/run.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/run.go b/cmd/run.go index e5be1f7169..0d5e6d03ba 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -311,7 +311,7 @@ func startMetricsHttpServer(c *config.Config) { return } mux.Handle(metrics.Endpoint, promhttp.Handler()) - metricsServer := &http.Server{ // nolint + metricsServer := &http.Server{ //nolint Potential Slowloris Attack Handler: mux, } log.Infof("metrics server listening on port %d", c.Metrics.Port)