Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix migration from 0005 to 0006 #159

Merged
merged 3 commits into from
May 14, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@


## Unreleased
+ Fixed a major issue where dataloss would occur when performing database
migration 0005 -> 0006. (#158)
+ Links to the Swagger/ReDoc API documentation are now provided on the main
page. (#152)
+ Fixed small error in development documentation. (#153)
Expand Down
14 changes: 10 additions & 4 deletions migrations/0006_make_primary_keys_autoincrement.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
"lower_limit" REAL
);
INSERT INTO "metric" SELECT * FROM "metric_temp";
DROP TABLE "metric_temp";
DROP INDEX IF EXISTS "metric_name";
CREATE UNIQUE INDEX "metric_name" ON "metric" ("name");

ALTER TABLE "datapoint" RENAME TO "datapoint_temp";
Expand All @@ -36,8 +36,11 @@
FOREIGN KEY ("metric_id") REFERENCES "metric" ("metric_id") ON DELETE CASCADE
);
INSERT INTO "datapoint" SELECT * FROM "datapoint_temp";
DROP TABLE "datapoint_temp";
DROP INDEX IF EXISTS "datapoint_metric_id";
CREATE INDEX "fakemodel_metric_id" ON "datapoint" ("metric_id");

DROP TABLE "metric_temp";
DROP TABLE "datapoint_temp";
"""

# New DDL
Expand All @@ -51,7 +54,7 @@
"lower_limit" REAL
);
INSERT INTO "metric" SELECT * FROM "metric_temp";
DROP TABLE "metric_temp";
DROP INDEX IF EXISTS "metric_name";
CREATE UNIQUE INDEX "metric_name" ON "metric" ("name");

ALTER TABLE "datapoint" RENAME TO "datapoint_temp";
Expand All @@ -63,8 +66,11 @@
FOREIGN KEY("metric_id") REFERENCES "metric" ( "metric_id" ) ON DELETE CASCADE
);
INSERT INTO "datapoint" SELECT * FROM "datapoint_temp";
DROP TABLE "datapoint_temp";
DROP INDEX IF EXISTS "datapoint_metric_id";
CREATE INDEX "datapoint_metric_id" ON "datapoint" ("metric_id");

DROP TABLE "datapoint_temp";
DROP TABLE "metric_temp";
"""


Expand Down
147 changes: 147 additions & 0 deletions tests/test_regression.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
# -*- coding: utf-8 -*-
"""
"""
from unittest.mock import MagicMock
from unittest.mock import patch

import pytest
from peewee import SqliteDatabase
from peewee_moves import DatabaseManager

from trendlines import routes
from trendlines import orm


@pytest.fixture
def db_0005(tmp_path):
path = tmp_path / "foo.db"
manager = DatabaseManager(SqliteDatabase(str(path)))
manager.upgrade("0005")
print(path)
yield path
path.unlink()


@pytest.fixture
def db_0005_with_data(db_0005):
orm.db.init(str(db_0005), pragmas=orm.DB_OPTS)
with orm.db:
m1 = orm.Metric.create(name="foo")
m2 = orm.Metric.create(name="bar")
orm.DataPoint.create(metric=m1, value=1, timestamp=1557860569)
orm.DataPoint.create(metric=m1, value=3, timestamp=1557860570)
orm.DataPoint.create(metric=m1, value=5, timestamp=1557860571)
orm.DataPoint.create(metric=m2, value=2, timestamp=1557860572)
orm.DataPoint.create(metric=m2, value=4, timestamp=1557860573)
yield db_0005


@pytest.fixture
def db_0006(tmp_path):
path = tmp_path / "foo.db"
manager = DatabaseManager(SqliteDatabase(str(path)))
manager.upgrade("0006")
print(path)
yield path
path.unlink()


@pytest.fixture
def db_0006_with_data(db_0006):
orm.db.init(str(db_0006), pragmas=orm.DB_OPTS)
with orm.db:
m1 = orm.Metric.create(name="foo")
m2 = orm.Metric.create(name="bar")
orm.DataPoint.create(metric=m1, value=1, timestamp=1557860569)
orm.DataPoint.create(metric=m1, value=3, timestamp=1557860570)
orm.DataPoint.create(metric=m1, value=5, timestamp=1557860571)
orm.DataPoint.create(metric=m2, value=2, timestamp=1557860572)
orm.DataPoint.create(metric=m2, value=4, timestamp=1557860573)
yield db_0006


@pytest.mark.regression
@pytest.mark.gh158
def test_migration_0005_to_0006(db_0005_with_data):
"""
There is an issue with upgrading from migration 0005 to 0006, introduced
in #143 / 6d6b050d4bf47d5b3cdc07fef8321c54861cfea1.

The issue is tracked in #158. Basically the upgrade would drop the
contents of the `datapoint` table, likely because of the `ON DELETE
CASCADE` that was added in migration 0005.
"""
# Verify we have data
with orm.db:
metric_0005 = orm.db.execute_sql('SELECT * FROM "Metric"').fetchall()
data_0005 = orm.db.execute_sql('SELECT * FROM "Datapoint"').fetchall()
migrations = orm.db.execute_sql(
'SELECT * FROM "migration_history"'
).fetchall()

assert len(metric_0005) != 0
assert len(data_0005) != 0
# Make sure we do not have migration 0006 applied.
msg = "Migration 0006 applied when it shouldn't be."
assert not any("0006" in m[1] for m in migrations), msg

# Then upgrade to 0006
# Note: we can't use manager.upgrade, as that doesn't reproduce the issue
orm.create_db(str(db_0005_with_data))

with orm.db:
metric_0006 = orm.db.execute_sql('SELECT * FROM "Metric"').fetchall()
data_0006 = orm.db.execute_sql('SELECT * FROM "Datapoint"').fetchall()
migrations = orm.db.execute_sql(
'SELECT * FROM "migration_history"'
).fetchall()

# Ensure that migration 0006 *is* applied.
msg = "Migration 0006 is not applied, it should be."
assert any(["0006" in m[1] for m in migrations]), msg

# And that data still matches.
assert len(metric_0006) != 0
assert metric_0006 == metric_0005
assert len(data_0006) != 0
assert data_0006 == data_0005


@pytest.mark.regression
@pytest.mark.gh158
def test_migration_0006_to_0005(db_0006_with_data):
# Verify we have data
with orm.db:
metric_0006 = orm.db.execute_sql('SELECT * FROM "Metric"').fetchall()
data_0006 = orm.db.execute_sql('SELECT * FROM "Datapoint"').fetchall()
migrations = orm.db.execute_sql(
'SELECT * FROM "migration_history"'
).fetchall()

assert len(metric_0006) != 0
assert len(data_0006) != 0
# Make sure we have migration 0006 applied.
msg = "Migration 0006 is not applied, it should be."
assert any("0006" in m[1] for m in migrations), msg

# Then downgrade to 0005. `orm.create_db` doesn't have any downgrade
# capability, so we need to use `manager.downgrade()`
manager = DatabaseManager(SqliteDatabase(str(db_0006_with_data)))
manager.downgrade("0005")

with orm.db:
metric_0005 = orm.db.execute_sql('SELECT * FROM "Metric"').fetchall()
data_0005 = orm.db.execute_sql('SELECT * FROM "Datapoint"').fetchall()
migrations = orm.db.execute_sql(
'SELECT * FROM "migration_history"'
).fetchall()

# Ensure that migration 0006 *is not* applied.
msg = "Migration 0006 applied when it shouldn't be."
assert not any("0006" in m[1] for m in migrations), msg

# And that data still matches.
assert len(metric_0005) != 0
assert metric_0005 == metric_0006
assert len(data_0005) != 0
assert data_0005 == data_0006