From 508357cb6b099a18b55dd9000aadbe6cb22ea832 Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Mon, 2 Feb 2026 14:18:42 +0100 Subject: [PATCH 01/13] change all template urls to use url_from helper delete unused rule.html template form --- flowapp/templates/forms/rule.html | 105 ------------------------- flowapp/templates/layouts/default.html | 10 +-- 2 files changed, 5 insertions(+), 110 deletions(-) delete mode 100644 flowapp/templates/forms/rule.html diff --git a/flowapp/templates/forms/rule.html b/flowapp/templates/forms/rule.html deleted file mode 100644 index 1a1c94b..0000000 --- a/flowapp/templates/forms/rule.html +++ /dev/null @@ -1,105 +0,0 @@ -{% extends 'layouts/default.html' %} -{% block title %}Add IPv4 rule{% endblock %} -{% block content %} -
-
-
-
- - -
-
- - -
-
-
-
- -
- -
-
- -
-
- -
-
-
-
- -
-
-
- - -
-
-
-
- - -
-
-
-
- - -
-
-
- - -
-
-
- - -
-
-
-
- -
- - - - -
-
-
-
- -
-
-
- - -
-
-
- -
-
- -
-
-
- -{% endblock %} \ No newline at end of file diff --git a/flowapp/templates/layouts/default.html b/flowapp/templates/layouts/default.html index 4ecbecd..402f566 100644 --- a/flowapp/templates/layouts/default.html +++ b/flowapp/templates/layouts/default.html @@ -18,7 +18,7 @@ - + {% block head %}{% endblock %} @@ -55,7 +55,7 @@ {% endfor %}
  • ExaFS version {{ session['app_version'] }}
  • -
  • API docs
  • +
  • API docs
  • {% endif %} @@ -111,9 +111,9 @@ - - - + + + From 016038a80134f868f2fbcb750ae36f1a758c4445 Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Mon, 2 Feb 2026 14:19:54 +0100 Subject: [PATCH 02/13] version 1.2.2 for develop --- flowapp/__about__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flowapp/__about__.py b/flowapp/__about__.py index 6cbfe4c..d5b95ac 100755 --- a/flowapp/__about__.py +++ b/flowapp/__about__.py @@ -1,4 +1,4 @@ -__version__ = "1.2.1" +__version__ = "1.2.2" __title__ = "ExaFS" __description__ = "Tool for creation, validation, and execution of ExaBGP messages." __author__ = "CESNET / Jiri Vrany, Petr Adamec, Josef Verich, Jakub Man" From 9f09b787e49e43bd8486e2783c2f0386dfd3a756 Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Mon, 16 Feb 2026 15:07:18 +0100 Subject: [PATCH 03/13] Replace db-init.py with baseline Alembic migration, track migrations in git - Add baseline migration (001_baseline) with complete v1.2.2 schema and seed data - Rewrite db-init.py to run flask db upgrade instead of raw db.create_all() - Remove /admin/set-org-if-zero endpoint, extract logic to scripts/migrate_v0x_to_v1.py - Remove migrations/ from .gitignore - Rewrite docs/DB_MIGRATIONS.md with new workflow --- .gitignore | 1 - CLAUDE.md | 19 +- db-init.py | 71 ++--- docs/DB_MIGRATIONS.md | 85 ++++-- flowapp/models/community.py | 1 + flowapp/models/organization.py | 1 + flowapp/models/rules/base.py | 2 + flowapp/models/user.py | 1 + flowapp/views/admin.py | 49 +--- migrations/README | 1 + migrations/alembic.ini | 50 ++++ migrations/env.py | 113 ++++++++ migrations/script.py.mako | 24 ++ migrations/versions/001_baseline.py | 401 ++++++++++++++++++++++++++++ scripts/migrate_v0x_to_v1.py | 95 +++++++ 15 files changed, 810 insertions(+), 104 deletions(-) create mode 100644 migrations/README create mode 100644 migrations/alembic.ini create mode 100644 migrations/env.py create mode 100644 migrations/script.py.mako create mode 100644 migrations/versions/001_baseline.py create mode 100644 scripts/migrate_v0x_to_v1.py diff --git a/.gitignore b/.gitignore index b8b1412..3813b9e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,6 @@ config.py instance_config_override.py run.py -migrations/ # PyPi .pypirc diff --git a/CLAUDE.md b/CLAUDE.md index 89728a2..1873abe 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -115,7 +115,9 @@ exafs/ ├── config.example.py # Configuration template ├── instance_config_override.example.py # Dashboard override template ├── run.example.py # Application run script template -├── db-init.py # Database initialization script +├── db-init.py # Database initialization (runs flask db upgrade) +├── scripts/ +│ └── migrate_v0x_to_v1.py # Optional v0.x to v1.0+ migration helper ├── pyproject.toml # Project metadata and dependencies ├── setup.cfg # Setup configuration ├── CHANGELOG.md # Version history @@ -283,7 +285,7 @@ cp run.example.py run.py # Edit config.py with database credentials and settings -# Initialize database +# Initialize database (runs flask db upgrade) python db-init.py # Run tests @@ -295,8 +297,10 @@ python run.py ### Database Migrations +Migration files are tracked in `migrations/versions/` and committed to git. + ```bash -# Create a new migration +# Create a new migration after model changes flask db migrate -m "Description of changes" # Apply migrations @@ -304,6 +308,9 @@ flask db upgrade # Rollback migration flask db downgrade + +# For existing databases adopting migrations for the first time +flask db stamp 001_baseline ``` ### Running Tests @@ -788,7 +795,9 @@ flask db upgrade # Apply migrations flake8 . # Lint code # Database -python db-init.py # Initialize database +python db-init.py # Initialize database (runs migrations) +python db-init.py --reset # Drop all tables and recreate (dev only) +flask db stamp 001_baseline # Mark existing DB as baseline flask db current # Show current migration flask db history # Show migration history @@ -804,7 +813,7 @@ supervisorctl status # Check status When working with this codebase: 1. **Always run tests** after making changes: `pytest` -2. **Create migrations** for model changes: `flask db migrate` +2. **Create migrations** for model changes: `flask db migrate` — commit migration files to git 3. **Follow the service layer pattern** - business logic goes in services, not views 4. **Use existing validators** in `flowapp/validators.py` for validation 5. **Check authentication** - most routes need `@auth_required` decorator diff --git a/db-init.py b/db-init.py index 2c8fd68..49ee7ff 100644 --- a/db-init.py +++ b/db-init.py @@ -1,39 +1,46 @@ +""" +Initialize the ExaFS database using Alembic migrations. -from flask import Flask -from flowapp import db -from flowapp.models import * +Usage: + python db-init.py # Create database from baseline migration + python db-init.py --reset # Drop all tables first, then recreate (DESTRUCTIVE) +""" -import config -from os import environ +import sys +from flask_migrate import upgrade +from flowapp import create_app, db -def create_app(): - app = Flask('FlowSpecDB init') - # Configurations - try: - env = environ['USERNAME'] - except KeyError as e: - env = 'Production' - if env == 'albert': - print("DEVEL") - app.config.from_object(config.DevelopmentConfig) - else: - print("PRODUCTION") - app.config.from_object(config.ProductionConfig) - - db.init_app(app) +def init_db(reset=False): + app = create_app() with app.app_context(): - print("#: cleaning database") - db.reflect() - db.drop_all() - print("#: creating tables") - db.create_all() - - - return app - - -if __name__ == '__main__': - create_app().app_context().push() + if reset: + print("#: WARNING - dropping all tables") + db.reflect() + db.drop_all() + # Also remove alembic_version if it exists + from sqlalchemy import text + + try: + db.session.execute(text("DROP TABLE IF EXISTS alembic_version")) + db.session.commit() + except Exception: + db.session.rollback() + + print("#: running migrations (flask db upgrade)") + upgrade() + print("#: database initialized successfully") + + +if __name__ == "__main__": + reset = "--reset" in sys.argv + if reset: + print("Reset mode: all existing data will be DESTROYED.") + confirm = input("Are you sure? (yes/no): ") + if confirm.lower() != "yes": + print("Aborted.") + sys.exit(0) + + init_db(reset=reset) diff --git a/docs/DB_MIGRATIONS.md b/docs/DB_MIGRATIONS.md index 93b8de3..4bb8305 100644 --- a/docs/DB_MIGRATIONS.md +++ b/docs/DB_MIGRATIONS.md @@ -1,35 +1,84 @@ -# How to Upgrade the Database +# Database Migrations -## General Guidelines -Migrations can be inconsistent. To avoid issues, we removed migrations from git repostory. To start the migration on your server, it is recomended reset the migration state on the server and run the migration based on the updated database models when switching application versions via Git. +ExaFS uses [Flask-Migrate](https://flask-migrate.readthedocs.io/) (Alembic) for database schema management. All migration files are tracked in the `migrations/` directory. + +## New Installation + +For a fresh database, run the migrations to create all tables and seed data: ```bash -rm -rf migrations/ +flask db upgrade ``` -```SQL -DROP TABLE alembic_version; +Or use the init script: + +```bash +python db-init.py ``` +## Upgrading Between Versions + +When upgrading ExaFS to a new version, apply any new migrations: + ```bash -flask db init -flask db migrate -m "Initial migration based on current DB state" flask db upgrade ``` -## Steps for Upgrading to v1.0.x -Limits for number of rules were introduced. Some database engines (Mariadb 10.x for example) have issue to set Non Null foreigin key to 0 and automatic migrations fail. The solution may be in diferent version (Mariadb 11.x works fine), or to set limits in db manually later. +This will apply only the migrations that haven't been applied yet. + +## Existing Installation (One-Time Setup) + +If you already have a running ExaFS database and are adopting the migration workflow for the first time, you need to tell Alembic that your database is already at the baseline state: + +```bash +flask db stamp 001_baseline +``` + +This writes the baseline revision ID to the `alembic_version` table without executing any SQL. From this point on, `flask db upgrade` will apply only newer migrations. + +## Upgrading from v0.x to v1.0+ -To set the limit to 0 for existing organizations run +If you are upgrading from a pre-1.0 version, rules need to be linked to organizations. An optional helper script is provided: -```SQL -UPDATE organization -SET limit_flowspec4 = 0, limit_flowspec6 = 0, limit_rtbh = 0 -WHERE limit_flowspec4 IS NULL OR limit_flowspec6 IS NULL OR limit_rtbh IS NULL; +```bash +python scripts/migrate_v0x_to_v1.py ``` -In all cases we need later assign rules to organizations. There's an admin endpoint for this: +This script: +1. Sets NULL organization limits to 0 +2. Assigns rules with `org_id=0` to the user's organization +3. Reports users with multiple organizations that need manual assignment + +After running the script, stamp the baseline: -`https://yourexafs.url/admin/set-org-if-zero` +```bash +flask db stamp 001_baseline +``` + +Feel free to contact jiri.vrany@cesnet.cz if you need help with the migration. + +## Creating New Migrations + +When you modify a database model, create a new migration: + +```bash +flask db migrate -m "Description of changes" +``` + +Review the generated file in `migrations/versions/`, then apply it: + +```bash +flask db upgrade +``` + +Commit the migration file to git so other deployments can apply it. + +## Development Reset + +To completely reset the database during development: + +```bash +python db-init.py --reset +``` -Or you can start with clean database and manually migrate data by SQL dump later. Feel free to contact jiri.vrany@cesnet.cz if you need help with the DB migration to 1.0.x. +This drops all tables and recreates them from scratch. **Do not use in production.** diff --git a/flowapp/models/community.py b/flowapp/models/community.py index 880a837..5df4310 100644 --- a/flowapp/models/community.py +++ b/flowapp/models/community.py @@ -45,6 +45,7 @@ class ASPath(db.Model): # Methods and initializer +# Note: seed data is also defined in migrations/versions/001_baseline.py - keep in sync @event.listens_for(Community.__table__, "after_create") def insert_initial_communities(table, conn, *args, **kwargs): conn.execute( diff --git a/flowapp/models/organization.py b/flowapp/models/organization.py index baf0ec1..67db8c5 100644 --- a/flowapp/models/organization.py +++ b/flowapp/models/organization.py @@ -29,6 +29,7 @@ def get_users(self): # Event listeners for Organization +# Note: seed data is also defined in migrations/versions/001_baseline.py - keep in sync @event.listens_for(Organization.__table__, "after_create") def insert_initial_organizations(table, conn, *args, **kwargs): conn.execute(table.insert().values(name="TU Liberec", arange="147.230.0.0/16\n2001:718:1c01::/48")) diff --git a/flowapp/models/rules/base.py b/flowapp/models/rules/base.py index 22fbc08..cbe889d 100644 --- a/flowapp/models/rules/base.py +++ b/flowapp/models/rules/base.py @@ -32,6 +32,7 @@ def __init__(self, name, command, description, role_id=2): # Event listeners for Rstate +# Note: seed data is also defined in migrations/versions/001_baseline.py - keep in sync @event.listens_for(Rstate.__table__, "after_create") def insert_initial_rulestates(table, conn, *args, **kwargs): conn.execute(table.insert().values(description="active rule")) @@ -40,6 +41,7 @@ def insert_initial_rulestates(table, conn, *args, **kwargs): conn.execute(table.insert().values(description="whitelisted rule")) +# Note: seed data is also defined in migrations/versions/001_baseline.py - keep in sync @event.listens_for(Action.__table__, "after_create") def insert_initial_actions(table, conn, *args, **kwargs): conn.execute( diff --git a/flowapp/models/user.py b/flowapp/models/user.py index dcb2d7e..78a028d 100644 --- a/flowapp/models/user.py +++ b/flowapp/models/user.py @@ -72,6 +72,7 @@ def __repr__(self): # Event listeners for Role +# Note: seed data is also defined in migrations/versions/001_baseline.py - keep in sync @event.listens_for(Role.__table__, "after_create") def insert_initial_roles(table, conn, *args, **kwargs): conn.execute(table.insert().values(name="view", description="just view, no edit")) diff --git a/flowapp/views/admin.py b/flowapp/views/admin.py index cf5241d..d00c6d8 100644 --- a/flowapp/views/admin.py +++ b/flowapp/views/admin.py @@ -3,7 +3,7 @@ from datetime import datetime, timedelta import secrets -from sqlalchemy import func, text +from sqlalchemy import func from flask import Blueprint, render_template, redirect, flash, request, session, url_for, current_app import sqlalchemy from sqlalchemy.exc import IntegrityError, OperationalError @@ -11,7 +11,6 @@ from ..forms import ASPathForm, BulkUserForm, MachineApiKeyForm, UserForm, ActionForm, OrganizationForm, CommunityForm from ..models import ( ASPath, - ApiKey, MachineApiKey, User, Action, @@ -660,49 +659,3 @@ def delete_community(community_id): flash(message, alert_type) return redirect(url_for("admin.communities")) - - -@admin.route("/set-org-if-zero", methods=["GET"]) -@auth_required -@admin_required -def update_set_org(): - # Define the raw SQL update statement - update_statement = update_statement = text( - """ - UPDATE organization - SET limit_flowspec4 = 0, limit_flowspec6 = 0, limit_rtbh = 0 - WHERE limit_flowspec4 IS NULL OR limit_flowspec6 IS NULL OR limit_rtbh IS NULL; - """ - ) - try: - # Execute the update query - db.session.execute(update_statement) - db.session.commit() - except Exception as e: - db.session.rollback() - flash(f"Error updating organizations: {e}", "alert-danger") - - # Get all flowspec records where org_id is NULL (if this is needed) - models = [Flowspec4, Flowspec6, RTBH, ApiKey, MachineApiKey] - user_with_multiple_orgs = {} - for model in models: - data_records = model.query.filter(model.org_id == 0).all() - # Loop through each flowspec record and update org_id based on the user's organization - updated = 0 - for row in data_records: - orgs = row.user.organization.all() - if len(orgs) == 1: - user_org = orgs[0] - if user_org: - row.org_id = user_org.id - updated += 1 - else: - user_with_multiple_orgs[row.user.email] = [org.name for org in orgs] - # Commit the changes - try: - db.session.commit() - except Exception as e: - db.session.rollback() - flash(f"Error updating {model.__name__}: {e}", "alert-danger") - - return render_template("pages/user_list.html", users=user_with_multiple_orgs, updated=updated) diff --git a/migrations/README b/migrations/README new file mode 100644 index 0000000..0e04844 --- /dev/null +++ b/migrations/README @@ -0,0 +1 @@ +Single-database configuration for Flask. diff --git a/migrations/alembic.ini b/migrations/alembic.ini new file mode 100644 index 0000000..ec9d45c --- /dev/null +++ b/migrations/alembic.ini @@ -0,0 +1,50 @@ +# A generic, single database configuration. + +[alembic] +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic,flask_migrate + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[logger_flask_migrate] +level = INFO +handlers = +qualname = flask_migrate + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/migrations/env.py b/migrations/env.py new file mode 100644 index 0000000..4c97092 --- /dev/null +++ b/migrations/env.py @@ -0,0 +1,113 @@ +import logging +from logging.config import fileConfig + +from flask import current_app + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +fileConfig(config.config_file_name) +logger = logging.getLogger('alembic.env') + + +def get_engine(): + try: + # this works with Flask-SQLAlchemy<3 and Alchemical + return current_app.extensions['migrate'].db.get_engine() + except (TypeError, AttributeError): + # this works with Flask-SQLAlchemy>=3 + return current_app.extensions['migrate'].db.engine + + +def get_engine_url(): + try: + return get_engine().url.render_as_string(hide_password=False).replace( + '%', '%%') + except AttributeError: + return str(get_engine().url).replace('%', '%%') + + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +config.set_main_option('sqlalchemy.url', get_engine_url()) +target_db = current_app.extensions['migrate'].db + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def get_metadata(): + if hasattr(target_db, 'metadatas'): + return target_db.metadatas[None] + return target_db.metadata + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, target_metadata=get_metadata(), literal_binds=True + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + + # this callback is used to prevent an auto-migration from being generated + # when there are no changes to the schema + # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html + def process_revision_directives(context, revision, directives): + if getattr(config.cmd_opts, 'autogenerate', False): + script = directives[0] + if script.upgrade_ops.is_empty(): + directives[:] = [] + logger.info('No changes in schema detected.') + + conf_args = current_app.extensions['migrate'].configure_args + if conf_args.get("process_revision_directives") is None: + conf_args["process_revision_directives"] = process_revision_directives + + connectable = get_engine() + + with connectable.connect() as connection: + context.configure( + connection=connection, + target_metadata=get_metadata(), + **conf_args + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/migrations/script.py.mako b/migrations/script.py.mako new file mode 100644 index 0000000..2c01563 --- /dev/null +++ b/migrations/script.py.mako @@ -0,0 +1,24 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} +branch_labels = ${repr(branch_labels)} +depends_on = ${repr(depends_on)} + + +def upgrade(): + ${upgrades if upgrades else "pass"} + + +def downgrade(): + ${downgrades if downgrades else "pass"} diff --git a/migrations/versions/001_baseline.py b/migrations/versions/001_baseline.py new file mode 100644 index 0000000..51df0ac --- /dev/null +++ b/migrations/versions/001_baseline.py @@ -0,0 +1,401 @@ +"""Baseline migration - complete schema for ExaFS v1.2.2 + +This is the baseline migration that creates the entire database schema. +For new installations: flask db upgrade +For existing installations: flask db stamp 001_baseline + +Revision ID: 001_baseline +Revises: +Create Date: 2026-02-13 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "001_baseline" +down_revision = None +branch_labels = None +depends_on = None + + +def upgrade(): + # --- Tables with no foreign key dependencies --- + + role_table = op.create_table( + "role", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("name", sa.String(length=20), unique=True), + sa.Column("description", sa.String(length=260)), + ) + + organization_table = op.create_table( + "organization", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("name", sa.String(length=150), unique=True), + sa.Column("arange", sa.Text()), + sa.Column("limit_flowspec4", sa.Integer(), default=0), + sa.Column("limit_flowspec6", sa.Integer(), default=0), + sa.Column("limit_rtbh", sa.Integer(), default=0), + ) + + rstate_table = op.create_table( + "rstate", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("description", sa.String(length=260)), + ) + + op.create_table( + "user", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("uuid", sa.String(length=180), unique=True), + sa.Column("comment", sa.String(length=500)), + sa.Column("email", sa.String(length=255)), + sa.Column("name", sa.String(length=255)), + sa.Column("phone", sa.String(length=255)), + ) + + op.create_table( + "as_path", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("prefix", sa.String(length=120), unique=True), + sa.Column("as_path", sa.String(length=250)), + ) + + op.create_table( + "log", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("time", sa.DateTime()), + sa.Column("task", sa.String(length=1000)), + sa.Column("author", sa.String(length=1000)), + sa.Column("rule_type", sa.Integer()), + sa.Column("rule_id", sa.Integer()), + sa.Column("user_id", sa.Integer()), + ) + + # --- Junction tables --- + + op.create_table( + "user_role", + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column("role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False), + sa.PrimaryKeyConstraint("user_id", "role_id"), + ) + + op.create_table( + "user_organization", + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "organization_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.PrimaryKeyConstraint("user_id", "organization_id"), + ) + + # --- Tables with foreign key to role --- + + action_table = op.create_table( + "action", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("name", sa.String(length=120), unique=True), + sa.Column("command", sa.String(length=120), unique=True), + sa.Column("description", sa.String(length=260)), + sa.Column("role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False), + ) + + community_table = op.create_table( + "community", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("name", sa.String(length=120), unique=True), + sa.Column("comm", sa.String(length=2047)), + sa.Column("larcomm", sa.String(length=2047)), + sa.Column("extcomm", sa.String(length=2047)), + sa.Column("description", sa.String(length=255)), + sa.Column("as_path", sa.Boolean(), default=False), + sa.Column("role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False), + ) + + # --- API key tables --- + + op.create_table( + "api_key", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("machine", sa.String(length=255)), + sa.Column("key", sa.String(length=255)), + sa.Column("readonly", sa.Boolean(), default=False), + sa.Column("expires", sa.DateTime(), nullable=True), + sa.Column("comment", sa.String(length=255)), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + ) + + op.create_table( + "machine_api_key", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("machine", sa.String(length=255)), + sa.Column("key", sa.String(length=255)), + sa.Column("readonly", sa.Boolean(), default=True), + sa.Column("expires", sa.DateTime(), nullable=True), + sa.Column("comment", sa.String(length=255)), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + ) + + # --- Rule tables --- + + op.create_table( + "flowspec4", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("source", sa.String(length=255)), + sa.Column("source_mask", sa.Integer()), + sa.Column("source_port", sa.String(length=255)), + sa.Column("dest", sa.String(length=255)), + sa.Column("dest_mask", sa.Integer()), + sa.Column("dest_port", sa.String(length=255)), + sa.Column("protocol", sa.String(length=255)), + sa.Column("flags", sa.String(length=255)), + sa.Column("packet_len", sa.String(length=255)), + sa.Column("fragment", sa.String(length=255)), + sa.Column("comment", sa.Text()), + sa.Column("expires", sa.DateTime()), + sa.Column("created", sa.DateTime()), + sa.Column( + "action_id", sa.Integer(), sa.ForeignKey("action.id"), nullable=False + ), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.Column( + "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False + ), + ) + + op.create_table( + "flowspec6", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("source", sa.String(length=255)), + sa.Column("source_mask", sa.Integer()), + sa.Column("source_port", sa.String(length=255)), + sa.Column("dest", sa.String(length=255)), + sa.Column("dest_mask", sa.Integer()), + sa.Column("dest_port", sa.String(length=255)), + sa.Column("next_header", sa.String(length=255)), + sa.Column("flags", sa.String(length=255)), + sa.Column("packet_len", sa.String(length=255)), + sa.Column("comment", sa.Text()), + sa.Column("expires", sa.DateTime()), + sa.Column("created", sa.DateTime()), + sa.Column( + "action_id", sa.Integer(), sa.ForeignKey("action.id"), nullable=False + ), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.Column( + "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False + ), + ) + + op.create_table( + "RTBH", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("ipv4", sa.String(length=255)), + sa.Column("ipv4_mask", sa.Integer()), + sa.Column("ipv6", sa.String(length=255)), + sa.Column("ipv6_mask", sa.Integer()), + sa.Column( + "community_id", + sa.Integer(), + sa.ForeignKey("community.id"), + nullable=False, + ), + sa.Column("comment", sa.Text()), + sa.Column("expires", sa.DateTime()), + sa.Column("created", sa.DateTime()), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.Column( + "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False + ), + ) + + op.create_table( + "whitelist", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("ip", sa.String(length=255)), + sa.Column("mask", sa.Integer()), + sa.Column("comment", sa.Text()), + sa.Column("expires", sa.DateTime()), + sa.Column("created", sa.DateTime()), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.Column( + "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False + ), + ) + + op.create_table( + "rule_whitelist_cache", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("rid", sa.Integer()), + sa.Column("rtype", sa.Integer()), + sa.Column("rorigin", sa.Integer()), + sa.Column( + "whitelist_id", sa.Integer(), sa.ForeignKey("whitelist.id"), nullable=True + ), + ) + + # --- Seed data --- + + op.bulk_insert( + role_table, + [ + {"name": "view", "description": "just view, no edit"}, + {"name": "user", "description": "can edit"}, + {"name": "admin", "description": "admin"}, + ], + ) + + op.bulk_insert( + organization_table, + [ + { + "name": "TU Liberec", + "arange": "147.230.0.0/16\n2001:718:1c01::/48", + "limit_flowspec4": 0, + "limit_flowspec6": 0, + "limit_rtbh": 0, + }, + { + "name": "Cesnet", + "arange": "147.230.0.0/16\n2001:718:1c01::/48", + "limit_flowspec4": 0, + "limit_flowspec6": 0, + "limit_rtbh": 0, + }, + ], + ) + + op.bulk_insert( + rstate_table, + [ + {"description": "active rule"}, + {"description": "withdrawed rule"}, + {"description": "deleted rule"}, + {"description": "whitelisted rule"}, + ], + ) + + op.bulk_insert( + action_table, + [ + { + "name": "QoS 100 kbps", + "command": "rate-limit 12800", + "description": "QoS", + "role_id": 2, + }, + { + "name": "QoS 1Mbps", + "command": "rate-limit 13107200", + "description": "QoS", + "role_id": 2, + }, + { + "name": "QoS 10Mbps", + "command": "rate-limit 131072000", + "description": "QoS", + "role_id": 2, + }, + { + "name": "Discard", + "command": "discard", + "description": "Discard", + "role_id": 2, + }, + ], + ) + + op.bulk_insert( + community_table, + [ + { + "name": "65535:65283", + "comm": "65535:65283", + "larcomm": "", + "extcomm": "", + "description": "local-as", + "as_path": False, + "role_id": 2, + }, + { + "name": "64496:64511", + "comm": "64496:64511", + "larcomm": "", + "extcomm": "", + "description": "", + "as_path": False, + "role_id": 2, + }, + { + "name": "64497:64510", + "comm": "64497:64510", + "larcomm": "", + "extcomm": "", + "description": "", + "as_path": False, + "role_id": 2, + }, + ], + ) + + +def downgrade(): + op.drop_table("rule_whitelist_cache") + op.drop_table("whitelist") + op.drop_table("RTBH") + op.drop_table("flowspec6") + op.drop_table("flowspec4") + op.drop_table("machine_api_key") + op.drop_table("api_key") + op.drop_table("community") + op.drop_table("action") + op.drop_table("user_organization") + op.drop_table("user_role") + op.drop_table("log") + op.drop_table("as_path") + op.drop_table("user") + op.drop_table("rstate") + op.drop_table("organization") + op.drop_table("role") diff --git a/scripts/migrate_v0x_to_v1.py b/scripts/migrate_v0x_to_v1.py new file mode 100644 index 0000000..3191021 --- /dev/null +++ b/scripts/migrate_v0x_to_v1.py @@ -0,0 +1,95 @@ +""" +Optional migration helper for upgrading from ExaFS v0.x to v1.0+ + +This script handles the one-time data migration required when rules became +organization-dependent in v1.0.0. It: +1. Sets NULL organization limits to 0 +2. Assigns rules with org_id=0 to the user's organization +3. Reports users with multiple organizations that need manual assignment + +Usage: + python scripts/migrate_v0x_to_v1.py + +After running this script, stamp the baseline migration: + flask db stamp 001_baseline +""" + +from flask import Flask +from flowapp import create_app, db +from flowapp.models import Flowspec4, Flowspec6, RTBH, ApiKey, MachineApiKey +from sqlalchemy import text + + +def migrate_org_data(): + app = create_app() + + with app.app_context(): + # Step 1: Set NULL organization limits to 0 + print("Setting NULL organization limits to 0...") + update_statement = text( + """ + UPDATE organization + SET limit_flowspec4 = 0, limit_flowspec6 = 0, limit_rtbh = 0 + WHERE limit_flowspec4 IS NULL OR limit_flowspec6 IS NULL OR limit_rtbh IS NULL; + """ + ) + try: + result = db.session.execute(update_statement) + db.session.commit() + print(f" Updated organization limits.") + except Exception as e: + db.session.rollback() + print(f" Error updating organizations: {e}") + return + + # Step 2: Assign rules with org_id=0 to user's organization + print("\nAssigning rules with org_id=0 to user organizations...") + models = [Flowspec4, Flowspec6, RTBH, ApiKey, MachineApiKey] + users_with_multiple_orgs = {} + total_updated = 0 + + for model in models: + model_name = model.__name__ + data_records = model.query.filter(model.org_id == 0).all() + + if not data_records: + print(f" {model_name}: no records with org_id=0") + continue + + updated = 0 + for row in data_records: + orgs = row.user.organization.all() + if len(orgs) == 1: + row.org_id = orgs[0].id + updated += 1 + else: + users_with_multiple_orgs[row.user.email] = [ + org.name for org in orgs + ] + + try: + db.session.commit() + print(f" {model_name}: updated {updated} records") + total_updated += updated + except Exception as e: + db.session.rollback() + print(f" {model_name}: error - {e}") + + # Step 3: Report results + print(f"\nTotal records updated: {total_updated}") + + if users_with_multiple_orgs: + print("\nUsers with multiple organizations (need manual assignment):") + for email, orgs in users_with_multiple_orgs.items(): + print(f" {email}: {', '.join(orgs)}") + print( + "\nPlease manually assign org_id for rules belonging to these users." + ) + else: + print("\nAll records assigned successfully.") + + print("\nNext step: flask db stamp 001_baseline") + + +if __name__ == "__main__": + migrate_org_data() From 385434f74c19849ef70d42aa5fe1c1a9f7719557 Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Tue, 17 Feb 2026 15:38:52 +0100 Subject: [PATCH 04/13] =?UTF-8?q?Baseline=20migration=20(001=5Fbaseline.py?= =?UTF-8?q?)=20=E2=80=94=20idempotent,=20handles=20any=20ExaFS=20database?= =?UTF-8?q?=20from=20v0.4+=20to=20current=20Migration=20tracking=20in=20gi?= =?UTF-8?q?t=20=E2=80=94=20removed=20migrations/=20from=20.gitignore=20db-?= =?UTF-8?q?init.py=20rewritten=20to=20use=20flask=20db=20upgrade=20instead?= =?UTF-8?q?=20of=20db.create=5Fall()=20/admin/set-org-if-zero=20removed=20?= =?UTF-8?q?=E2=80=94=20replaced=20with=20standalone=20migrate=5Fv0x=5Fto?= =?UTF-8?q?=5Fv1.py=20Docker=20dev=20container=20=E2=80=94=20added=20PYTHO?= =?UTF-8?q?NPATH=3D/app=20for=20easier=20development=20Alembic=20env.py=20?= =?UTF-8?q?=E2=80=94=20fixed=20deprecation=20warning=20Docs=20updated=20?= =?UTF-8?q?=E2=80=94=20DB=5FMIGRATIONS.md=20and=20CHANGELOG.md?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CHANGELOG.md | 17 + db-init.py | 11 +- docs/DB_MIGRATIONS.md | 41 +- migrations/env.py | 7 +- migrations/versions/001_baseline.py | 880 +++++++++++++++++----------- scripts/migrate_v0x_to_v1.py | 23 +- 6 files changed, 605 insertions(+), 374 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dad22de..aa473cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,21 @@ All notable changes to ExaFS will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.2.2] - 2026-02-16 + +### Changed +- **Database migrations now tracked in git** — `migrations/` removed from `.gitignore` +- Replaced `db-init.py` with migration-based initialization (`flask db upgrade`) +- Removed one-time `/admin/set-org-if-zero` endpoint, replaced with standalone `scripts/migrate_v0x_to_v1.py` +- Fixed Flask-SQLAlchemy deprecation warning in Alembic `env.py` +- Template URLs changed to use `url_for` helper, removed unused `rule.html` template + +### Added +- Idempotent baseline migration (`001_baseline`) that brings any ExaFS database (from v0.4+ to current) to the v1.2.2 schema +- Optional `scripts/migrate_v0x_to_v1.py` helper for v0.x to v1.0+ data migration (org_id backfill) +- `db-init.py --reset` flag for development database reset +- `PYTHONPATH` set in Docker dev container for easier development + ## [1.2.1] - 2026-01-30 ### Fixed @@ -286,6 +301,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Route Distinguisher for VRF now supported - See config example and update your `config.py` +[1.2.2]: https://github.com/CESNET/exafs/compare/v1.2.1...v1.2.2 +[1.2.1]: https://github.com/CESNET/exafs/compare/v1.2.0...v1.2.1 [1.2.0]: https://github.com/CESNET/exafs/compare/v1.1.9...v1.2.0 [1.1.9]: https://github.com/CESNET/exafs/compare/v1.1.8...v1.1.9 [1.1.8]: https://github.com/CESNET/exafs/compare/v1.1.7...v1.1.8 diff --git a/db-init.py b/db-init.py index 49ee7ff..19ff813 100644 --- a/db-init.py +++ b/db-init.py @@ -7,13 +7,22 @@ """ import sys +from os import environ from flask_migrate import upgrade from flowapp import create_app, db +import config + def init_db(reset=False): - app = create_app() + exafs_env = environ.get("EXAFS_ENV", "Production").lower() + if exafs_env in ("devel", "development"): + app = create_app(config.DevelopmentConfig) + else: + app = create_app(config.ProductionConfig) + + db.init_app(app) with app.app_context(): if reset: diff --git a/docs/DB_MIGRATIONS.md b/docs/DB_MIGRATIONS.md index 4bb8305..6b25da3 100644 --- a/docs/DB_MIGRATIONS.md +++ b/docs/DB_MIGRATIONS.md @@ -28,17 +28,44 @@ This will apply only the migrations that haven't been applied yet. ## Existing Installation (One-Time Setup) -If you already have a running ExaFS database and are adopting the migration workflow for the first time, you need to tell Alembic that your database is already at the baseline state: +If you already have a running ExaFS database from any previous version, the baseline migration is idempotent — it will create missing tables, add missing columns, and skip anything that already exists. + +### Deployments that used `flask db init` (self-managed migrations) + +Some deployments previously ran `flask db init` to create a local `migrations/` directory and auto-generated migration files. Starting with v1.2.2, migration files are tracked in git and shipped with the project. To switch to the official migrations: + +1. **Delete the local migrations directory** created by `flask db init`: + ```bash + rm -rf migrations/ + ``` + Then pull or copy the project's `migrations/` directory from git. + +2. **Stamp the database** to register the baseline revision without re-running it (your schema is already up to date): + ```bash + flask db stamp 001_baseline + ``` + +3. From now on, just run `flask db upgrade` when updating ExaFS. + +### Deployments without any migration tracking + +If your database has an `alembic_version` table from a previous migration setup but no local `migrations/` directory, clear it first: + +```sql +DELETE FROM alembic_version; +``` + +Then run the upgrade: ```bash -flask db stamp 001_baseline +flask db upgrade ``` -This writes the baseline revision ID to the `alembic_version` table without executing any SQL. From this point on, `flask db upgrade` will apply only newer migrations. +The baseline migration will inspect your database and bring it up to the current schema without affecting existing data. ## Upgrading from v0.x to v1.0+ -If you are upgrading from a pre-1.0 version, rules need to be linked to organizations. An optional helper script is provided: +If you are upgrading from a pre-1.0 version, the baseline migration will add the missing `org_id` columns and organization limit columns automatically. However, existing rules still need to be linked to organizations. An optional helper script is provided for this: ```bash python scripts/migrate_v0x_to_v1.py @@ -49,12 +76,6 @@ This script: 2. Assigns rules with `org_id=0` to the user's organization 3. Reports users with multiple organizations that need manual assignment -After running the script, stamp the baseline: - -```bash -flask db stamp 001_baseline -``` - Feel free to contact jiri.vrany@cesnet.cz if you need help with the migration. ## Creating New Migrations diff --git a/migrations/env.py b/migrations/env.py index 4c97092..fc132b9 100644 --- a/migrations/env.py +++ b/migrations/env.py @@ -16,12 +16,7 @@ def get_engine(): - try: - # this works with Flask-SQLAlchemy<3 and Alchemical - return current_app.extensions['migrate'].db.get_engine() - except (TypeError, AttributeError): - # this works with Flask-SQLAlchemy>=3 - return current_app.extensions['migrate'].db.engine + return current_app.extensions['migrate'].db.engine def get_engine_url(): diff --git a/migrations/versions/001_baseline.py b/migrations/versions/001_baseline.py index 51df0ac..76065dc 100644 --- a/migrations/versions/001_baseline.py +++ b/migrations/versions/001_baseline.py @@ -1,8 +1,13 @@ """Baseline migration - complete schema for ExaFS v1.2.2 -This is the baseline migration that creates the entire database schema. -For new installations: flask db upgrade -For existing installations: flask db stamp 001_baseline +Idempotent migration that brings any ExaFS database to the v1.2.2 schema. +- For new installations: creates all tables and seed data +- For existing installations: creates missing tables, adds missing columns, + skips anything that already exists + +Usage: + New install: flask db upgrade + Existing install: DELETE FROM alembic_version; flask db upgrade Revision ID: 001_baseline Revises: @@ -20,365 +25,546 @@ depends_on = None +def _table_exists(table_name): + """Check if a table exists in the current database.""" + conn = op.get_bind() + return sa.inspect(conn).has_table(table_name) + + +def _column_exists(table_name, column_name): + """Check if a column exists in a table.""" + conn = op.get_bind() + columns = [c["name"] for c in sa.inspect(conn).get_columns(table_name)] + return column_name in columns + + +def _table_has_data(table_name): + """Check if a table has any rows.""" + conn = op.get_bind() + result = conn.execute(sa.text(f"SELECT COUNT(*) FROM `{table_name}`")) + return result.scalar() > 0 + + def upgrade(): # --- Tables with no foreign key dependencies --- - role_table = op.create_table( - "role", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("name", sa.String(length=20), unique=True), - sa.Column("description", sa.String(length=260)), - ) - - organization_table = op.create_table( - "organization", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("name", sa.String(length=150), unique=True), - sa.Column("arange", sa.Text()), - sa.Column("limit_flowspec4", sa.Integer(), default=0), - sa.Column("limit_flowspec6", sa.Integer(), default=0), - sa.Column("limit_rtbh", sa.Integer(), default=0), - ) - - rstate_table = op.create_table( - "rstate", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("description", sa.String(length=260)), - ) - - op.create_table( - "user", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("uuid", sa.String(length=180), unique=True), - sa.Column("comment", sa.String(length=500)), - sa.Column("email", sa.String(length=255)), - sa.Column("name", sa.String(length=255)), - sa.Column("phone", sa.String(length=255)), - ) - - op.create_table( - "as_path", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("prefix", sa.String(length=120), unique=True), - sa.Column("as_path", sa.String(length=250)), - ) - - op.create_table( - "log", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("time", sa.DateTime()), - sa.Column("task", sa.String(length=1000)), - sa.Column("author", sa.String(length=1000)), - sa.Column("rule_type", sa.Integer()), - sa.Column("rule_id", sa.Integer()), - sa.Column("user_id", sa.Integer()), - ) + if not _table_exists("role"): + role_table = op.create_table( + "role", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("name", sa.String(length=20), unique=True), + sa.Column("description", sa.String(length=260)), + ) + _seed_roles = True + else: + role_table = sa.table( + "role", + sa.column("name", sa.String), + sa.column("description", sa.String), + ) + _seed_roles = False + + if not _table_exists("organization"): + organization_table = op.create_table( + "organization", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("name", sa.String(length=150), unique=True), + sa.Column("arange", sa.Text()), + sa.Column("limit_flowspec4", sa.Integer(), default=0), + sa.Column("limit_flowspec6", sa.Integer(), default=0), + sa.Column("limit_rtbh", sa.Integer(), default=0), + ) + _seed_orgs = True + else: + organization_table = None + _seed_orgs = False + # Add limit columns if missing (pre-v1.0 databases) + for col_name in ("limit_flowspec4", "limit_flowspec6", "limit_rtbh"): + if not _column_exists("organization", col_name): + op.add_column( + "organization", sa.Column(col_name, sa.Integer(), default=0) + ) + + if not _table_exists("rstate"): + rstate_table = op.create_table( + "rstate", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("description", sa.String(length=260)), + ) + _seed_rstates = True + else: + rstate_table = sa.table( + "rstate", + sa.column("description", sa.String), + ) + _seed_rstates = False + + if not _table_exists("user"): + op.create_table( + "user", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("uuid", sa.String(length=180), unique=True), + sa.Column("comment", sa.String(length=500)), + sa.Column("email", sa.String(length=255)), + sa.Column("name", sa.String(length=255)), + sa.Column("phone", sa.String(length=255)), + ) + + if not _table_exists("as_path"): + op.create_table( + "as_path", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("prefix", sa.String(length=120), unique=True), + sa.Column("as_path", sa.String(length=250)), + ) + + if not _table_exists("log"): + op.create_table( + "log", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("time", sa.DateTime()), + sa.Column("task", sa.String(length=1000)), + sa.Column("author", sa.String(length=1000)), + sa.Column("rule_type", sa.Integer()), + sa.Column("rule_id", sa.Integer()), + sa.Column("user_id", sa.Integer()), + ) # --- Junction tables --- - op.create_table( - "user_role", - sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), - sa.Column("role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False), - sa.PrimaryKeyConstraint("user_id", "role_id"), - ) - - op.create_table( - "user_organization", - sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), - sa.Column( - "organization_id", - sa.Integer(), - sa.ForeignKey("organization.id"), - nullable=False, - ), - sa.PrimaryKeyConstraint("user_id", "organization_id"), - ) + if not _table_exists("user_role"): + op.create_table( + "user_role", + sa.Column( + "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False + ), + sa.Column( + "role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False + ), + sa.PrimaryKeyConstraint("user_id", "role_id"), + ) + + if not _table_exists("user_organization"): + op.create_table( + "user_organization", + sa.Column( + "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False + ), + sa.Column( + "organization_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.PrimaryKeyConstraint("user_id", "organization_id"), + ) # --- Tables with foreign key to role --- - action_table = op.create_table( - "action", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("name", sa.String(length=120), unique=True), - sa.Column("command", sa.String(length=120), unique=True), - sa.Column("description", sa.String(length=260)), - sa.Column("role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False), - ) - - community_table = op.create_table( - "community", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("name", sa.String(length=120), unique=True), - sa.Column("comm", sa.String(length=2047)), - sa.Column("larcomm", sa.String(length=2047)), - sa.Column("extcomm", sa.String(length=2047)), - sa.Column("description", sa.String(length=255)), - sa.Column("as_path", sa.Boolean(), default=False), - sa.Column("role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False), - ) + if not _table_exists("action"): + action_table = op.create_table( + "action", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("name", sa.String(length=120), unique=True), + sa.Column("command", sa.String(length=120), unique=True), + sa.Column("description", sa.String(length=260)), + sa.Column( + "role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False + ), + ) + _seed_actions = True + else: + action_table = sa.table( + "action", + sa.column("name", sa.String), + sa.column("command", sa.String), + sa.column("description", sa.String), + sa.column("role_id", sa.Integer), + ) + _seed_actions = False + + if not _table_exists("community"): + community_table = op.create_table( + "community", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("name", sa.String(length=120), unique=True), + sa.Column("comm", sa.String(length=2047)), + sa.Column("larcomm", sa.String(length=2047)), + sa.Column("extcomm", sa.String(length=2047)), + sa.Column("description", sa.String(length=255)), + sa.Column("as_path", sa.Boolean(), default=False), + sa.Column( + "role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False + ), + ) + _seed_communities = True + else: + community_table = sa.table( + "community", + sa.column("name", sa.String), + sa.column("comm", sa.String), + sa.column("larcomm", sa.String), + sa.column("extcomm", sa.String), + sa.column("description", sa.String), + sa.column("as_path", sa.Boolean), + sa.column("role_id", sa.Integer), + ) + _seed_communities = False + # Add as_path column if missing (pre-v1.1 databases) + if not _column_exists("community", "as_path"): + op.add_column( + "community", + sa.Column("as_path", sa.Boolean(), default=False), + ) # --- API key tables --- - op.create_table( - "api_key", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("machine", sa.String(length=255)), - sa.Column("key", sa.String(length=255)), - sa.Column("readonly", sa.Boolean(), default=False), - sa.Column("expires", sa.DateTime(), nullable=True), - sa.Column("comment", sa.String(length=255)), - sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), - sa.Column( - "org_id", - sa.Integer(), - sa.ForeignKey("organization.id"), - nullable=False, - ), - ) - - op.create_table( - "machine_api_key", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("machine", sa.String(length=255)), - sa.Column("key", sa.String(length=255)), - sa.Column("readonly", sa.Boolean(), default=True), - sa.Column("expires", sa.DateTime(), nullable=True), - sa.Column("comment", sa.String(length=255)), - sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), - sa.Column( - "org_id", - sa.Integer(), - sa.ForeignKey("organization.id"), - nullable=False, - ), - ) + if not _table_exists("api_key"): + op.create_table( + "api_key", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("machine", sa.String(length=255)), + sa.Column("key", sa.String(length=255)), + sa.Column("readonly", sa.Boolean(), default=False), + sa.Column("expires", sa.DateTime(), nullable=True), + sa.Column("comment", sa.String(length=255)), + sa.Column( + "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False + ), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + ) + else: + # Add columns introduced after initial api_key creation + for col_name, col_type, col_default in [ + ("comment", sa.String(length=255), None), + ("readonly", sa.Boolean(), False), + ("expires", sa.DateTime(), None), + ]: + if not _column_exists("api_key", col_name): + op.add_column( + "api_key", + sa.Column(col_name, col_type, default=col_default), + ) + if not _column_exists("api_key", "org_id"): + op.add_column( + "api_key", + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=True, + ), + ) + + if not _table_exists("machine_api_key"): + op.create_table( + "machine_api_key", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("machine", sa.String(length=255)), + sa.Column("key", sa.String(length=255)), + sa.Column("readonly", sa.Boolean(), default=True), + sa.Column("expires", sa.DateTime(), nullable=True), + sa.Column("comment", sa.String(length=255)), + sa.Column( + "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False + ), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + ) # --- Rule tables --- - op.create_table( - "flowspec4", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("source", sa.String(length=255)), - sa.Column("source_mask", sa.Integer()), - sa.Column("source_port", sa.String(length=255)), - sa.Column("dest", sa.String(length=255)), - sa.Column("dest_mask", sa.Integer()), - sa.Column("dest_port", sa.String(length=255)), - sa.Column("protocol", sa.String(length=255)), - sa.Column("flags", sa.String(length=255)), - sa.Column("packet_len", sa.String(length=255)), - sa.Column("fragment", sa.String(length=255)), - sa.Column("comment", sa.Text()), - sa.Column("expires", sa.DateTime()), - sa.Column("created", sa.DateTime()), - sa.Column( - "action_id", sa.Integer(), sa.ForeignKey("action.id"), nullable=False - ), - sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), - sa.Column( - "org_id", - sa.Integer(), - sa.ForeignKey("organization.id"), - nullable=False, - ), - sa.Column( - "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False - ), - ) - - op.create_table( - "flowspec6", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("source", sa.String(length=255)), - sa.Column("source_mask", sa.Integer()), - sa.Column("source_port", sa.String(length=255)), - sa.Column("dest", sa.String(length=255)), - sa.Column("dest_mask", sa.Integer()), - sa.Column("dest_port", sa.String(length=255)), - sa.Column("next_header", sa.String(length=255)), - sa.Column("flags", sa.String(length=255)), - sa.Column("packet_len", sa.String(length=255)), - sa.Column("comment", sa.Text()), - sa.Column("expires", sa.DateTime()), - sa.Column("created", sa.DateTime()), - sa.Column( - "action_id", sa.Integer(), sa.ForeignKey("action.id"), nullable=False - ), - sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), - sa.Column( - "org_id", - sa.Integer(), - sa.ForeignKey("organization.id"), - nullable=False, - ), - sa.Column( - "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False - ), - ) - - op.create_table( - "RTBH", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("ipv4", sa.String(length=255)), - sa.Column("ipv4_mask", sa.Integer()), - sa.Column("ipv6", sa.String(length=255)), - sa.Column("ipv6_mask", sa.Integer()), - sa.Column( - "community_id", - sa.Integer(), - sa.ForeignKey("community.id"), - nullable=False, - ), - sa.Column("comment", sa.Text()), - sa.Column("expires", sa.DateTime()), - sa.Column("created", sa.DateTime()), - sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), - sa.Column( - "org_id", - sa.Integer(), - sa.ForeignKey("organization.id"), - nullable=False, - ), - sa.Column( - "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False - ), - ) - - op.create_table( - "whitelist", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("ip", sa.String(length=255)), - sa.Column("mask", sa.Integer()), - sa.Column("comment", sa.Text()), - sa.Column("expires", sa.DateTime()), - sa.Column("created", sa.DateTime()), - sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), - sa.Column( - "org_id", - sa.Integer(), - sa.ForeignKey("organization.id"), - nullable=False, - ), - sa.Column( - "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False - ), - ) - - op.create_table( - "rule_whitelist_cache", - sa.Column("id", sa.Integer(), primary_key=True), - sa.Column("rid", sa.Integer()), - sa.Column("rtype", sa.Integer()), - sa.Column("rorigin", sa.Integer()), - sa.Column( - "whitelist_id", sa.Integer(), sa.ForeignKey("whitelist.id"), nullable=True - ), - ) - - # --- Seed data --- - - op.bulk_insert( - role_table, - [ - {"name": "view", "description": "just view, no edit"}, - {"name": "user", "description": "can edit"}, - {"name": "admin", "description": "admin"}, - ], - ) - - op.bulk_insert( - organization_table, - [ - { - "name": "TU Liberec", - "arange": "147.230.0.0/16\n2001:718:1c01::/48", - "limit_flowspec4": 0, - "limit_flowspec6": 0, - "limit_rtbh": 0, - }, - { - "name": "Cesnet", - "arange": "147.230.0.0/16\n2001:718:1c01::/48", - "limit_flowspec4": 0, - "limit_flowspec6": 0, - "limit_rtbh": 0, - }, - ], - ) - - op.bulk_insert( - rstate_table, - [ - {"description": "active rule"}, - {"description": "withdrawed rule"}, - {"description": "deleted rule"}, - {"description": "whitelisted rule"}, - ], - ) - - op.bulk_insert( - action_table, - [ - { - "name": "QoS 100 kbps", - "command": "rate-limit 12800", - "description": "QoS", - "role_id": 2, - }, - { - "name": "QoS 1Mbps", - "command": "rate-limit 13107200", - "description": "QoS", - "role_id": 2, - }, - { - "name": "QoS 10Mbps", - "command": "rate-limit 131072000", - "description": "QoS", - "role_id": 2, - }, - { - "name": "Discard", - "command": "discard", - "description": "Discard", - "role_id": 2, - }, - ], - ) - - op.bulk_insert( - community_table, - [ - { - "name": "65535:65283", - "comm": "65535:65283", - "larcomm": "", - "extcomm": "", - "description": "local-as", - "as_path": False, - "role_id": 2, - }, - { - "name": "64496:64511", - "comm": "64496:64511", - "larcomm": "", - "extcomm": "", - "description": "", - "as_path": False, - "role_id": 2, - }, - { - "name": "64497:64510", - "comm": "64497:64510", - "larcomm": "", - "extcomm": "", - "description": "", - "as_path": False, - "role_id": 2, - }, - ], - ) + if not _table_exists("flowspec4"): + op.create_table( + "flowspec4", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("source", sa.String(length=255)), + sa.Column("source_mask", sa.Integer()), + sa.Column("source_port", sa.String(length=255)), + sa.Column("dest", sa.String(length=255)), + sa.Column("dest_mask", sa.Integer()), + sa.Column("dest_port", sa.String(length=255)), + sa.Column("protocol", sa.String(length=255)), + sa.Column("flags", sa.String(length=255)), + sa.Column("packet_len", sa.String(length=255)), + sa.Column("fragment", sa.String(length=255)), + sa.Column("comment", sa.Text()), + sa.Column("expires", sa.DateTime()), + sa.Column("created", sa.DateTime()), + sa.Column( + "action_id", sa.Integer(), sa.ForeignKey("action.id"), nullable=False + ), + sa.Column( + "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False + ), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.Column( + "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False + ), + ) + else: + if not _column_exists("flowspec4", "fragment"): + op.add_column( + "flowspec4", + sa.Column("fragment", sa.String(length=255)), + ) + if not _column_exists("flowspec4", "org_id"): + op.add_column( + "flowspec4", + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=True, + ), + ) + + if not _table_exists("flowspec6"): + op.create_table( + "flowspec6", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("source", sa.String(length=255)), + sa.Column("source_mask", sa.Integer()), + sa.Column("source_port", sa.String(length=255)), + sa.Column("dest", sa.String(length=255)), + sa.Column("dest_mask", sa.Integer()), + sa.Column("dest_port", sa.String(length=255)), + sa.Column("next_header", sa.String(length=255)), + sa.Column("flags", sa.String(length=255)), + sa.Column("packet_len", sa.String(length=255)), + sa.Column("comment", sa.Text()), + sa.Column("expires", sa.DateTime()), + sa.Column("created", sa.DateTime()), + sa.Column( + "action_id", sa.Integer(), sa.ForeignKey("action.id"), nullable=False + ), + sa.Column( + "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False + ), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.Column( + "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False + ), + ) + else: + if not _column_exists("flowspec6", "org_id"): + op.add_column( + "flowspec6", + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=True, + ), + ) + + if not _table_exists("RTBH"): + op.create_table( + "RTBH", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("ipv4", sa.String(length=255)), + sa.Column("ipv4_mask", sa.Integer()), + sa.Column("ipv6", sa.String(length=255)), + sa.Column("ipv6_mask", sa.Integer()), + sa.Column( + "community_id", + sa.Integer(), + sa.ForeignKey("community.id"), + nullable=False, + ), + sa.Column("comment", sa.Text()), + sa.Column("expires", sa.DateTime()), + sa.Column("created", sa.DateTime()), + sa.Column( + "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False + ), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.Column( + "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False + ), + ) + else: + if not _column_exists("RTBH", "org_id"): + op.add_column( + "RTBH", + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=True, + ), + ) + + if not _table_exists("whitelist"): + op.create_table( + "whitelist", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("ip", sa.String(length=255)), + sa.Column("mask", sa.Integer()), + sa.Column("comment", sa.Text()), + sa.Column("expires", sa.DateTime()), + sa.Column("created", sa.DateTime()), + sa.Column( + "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False + ), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.Column( + "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False + ), + ) + + if not _table_exists("rule_whitelist_cache"): + op.create_table( + "rule_whitelist_cache", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("rid", sa.Integer()), + sa.Column("rtype", sa.Integer()), + sa.Column("rorigin", sa.Integer()), + sa.Column( + "whitelist_id", + sa.Integer(), + sa.ForeignKey("whitelist.id"), + nullable=True, + ), + ) + + # --- Seed data (only for newly created tables) --- + + if _seed_roles and not _table_has_data("role"): + op.bulk_insert( + role_table, + [ + {"name": "view", "description": "just view, no edit"}, + {"name": "user", "description": "can edit"}, + {"name": "admin", "description": "admin"}, + ], + ) + + if _seed_orgs and organization_table is not None: + op.bulk_insert( + organization_table, + [ + { + "name": "TU Liberec", + "arange": "147.230.0.0/16\n2001:718:1c01::/48", + "limit_flowspec4": 0, + "limit_flowspec6": 0, + "limit_rtbh": 0, + }, + { + "name": "Cesnet", + "arange": "147.230.0.0/16\n2001:718:1c01::/48", + "limit_flowspec4": 0, + "limit_flowspec6": 0, + "limit_rtbh": 0, + }, + ], + ) + + if _seed_rstates and not _table_has_data("rstate"): + op.bulk_insert( + rstate_table, + [ + {"description": "active rule"}, + {"description": "withdrawed rule"}, + {"description": "deleted rule"}, + {"description": "whitelisted rule"}, + ], + ) + + if _seed_actions and not _table_has_data("action"): + op.bulk_insert( + action_table, + [ + { + "name": "QoS 100 kbps", + "command": "rate-limit 12800", + "description": "QoS", + "role_id": 2, + }, + { + "name": "QoS 1Mbps", + "command": "rate-limit 13107200", + "description": "QoS", + "role_id": 2, + }, + { + "name": "QoS 10Mbps", + "command": "rate-limit 131072000", + "description": "QoS", + "role_id": 2, + }, + { + "name": "Discard", + "command": "discard", + "description": "Discard", + "role_id": 2, + }, + ], + ) + + if _seed_communities and not _table_has_data("community"): + op.bulk_insert( + community_table, + [ + { + "name": "65535:65283", + "comm": "65535:65283", + "larcomm": "", + "extcomm": "", + "description": "local-as", + "as_path": False, + "role_id": 2, + }, + { + "name": "64496:64511", + "comm": "64496:64511", + "larcomm": "", + "extcomm": "", + "description": "", + "as_path": False, + "role_id": 2, + }, + { + "name": "64497:64510", + "comm": "64497:64510", + "larcomm": "", + "extcomm": "", + "description": "", + "as_path": False, + "role_id": 2, + }, + ], + ) def downgrade(): diff --git a/scripts/migrate_v0x_to_v1.py b/scripts/migrate_v0x_to_v1.py index 3191021..9a37511 100644 --- a/scripts/migrate_v0x_to_v1.py +++ b/scripts/migrate_v0x_to_v1.py @@ -14,14 +14,23 @@ flask db stamp 001_baseline """ -from flask import Flask +from os import environ + from flowapp import create_app, db from flowapp.models import Flowspec4, Flowspec6, RTBH, ApiKey, MachineApiKey from sqlalchemy import text +import config + def migrate_org_data(): - app = create_app() + exafs_env = environ.get("EXAFS_ENV", "Production").lower() + if exafs_env in ("devel", "development"): + app = create_app(config.DevelopmentConfig) + else: + app = create_app(config.ProductionConfig) + + db.init_app(app) with app.app_context(): # Step 1: Set NULL organization limits to 0 @@ -63,9 +72,7 @@ def migrate_org_data(): row.org_id = orgs[0].id updated += 1 else: - users_with_multiple_orgs[row.user.email] = [ - org.name for org in orgs - ] + users_with_multiple_orgs[row.user.email] = [org.name for org in orgs] try: db.session.commit() @@ -82,14 +89,10 @@ def migrate_org_data(): print("\nUsers with multiple organizations (need manual assignment):") for email, orgs in users_with_multiple_orgs.items(): print(f" {email}: {', '.join(orgs)}") - print( - "\nPlease manually assign org_id for rules belonging to these users." - ) + print("\nPlease manually assign org_id for rules belonging to these users.") else: print("\nAll records assigned successfully.") - print("\nNext step: flask db stamp 001_baseline") - if __name__ == "__main__": migrate_org_data() From 49b8e8c73b7a8a99359f94a27d4505805e980dbd Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Tue, 17 Feb 2026 17:06:42 +0100 Subject: [PATCH 05/13] Move migrations/ inside flowapp package so they ship with pip install MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Migrations bundled as package data — no flask db init needed. Flask-Migrate configured to find them via package-relative path. Added community.as_path to baseline migration column checks. Updated DB_MIGRATIONS.md with upgrade paths for existing deployments. --- docs/DB_MIGRATIONS.md | 11 +++++++---- flowapp/__about__.py | 2 +- flowapp/__init__.py | 6 +++++- {migrations => flowapp/migrations}/README | 0 {migrations => flowapp/migrations}/alembic.ini | 0 {migrations => flowapp/migrations}/env.py | 0 {migrations => flowapp/migrations}/script.py.mako | 0 .../migrations}/versions/001_baseline.py | 0 pyproject.toml | 2 ++ 9 files changed, 15 insertions(+), 6 deletions(-) rename {migrations => flowapp/migrations}/README (100%) rename {migrations => flowapp/migrations}/alembic.ini (100%) rename {migrations => flowapp/migrations}/env.py (100%) rename {migrations => flowapp/migrations}/script.py.mako (100%) rename {migrations => flowapp/migrations}/versions/001_baseline.py (100%) diff --git a/docs/DB_MIGRATIONS.md b/docs/DB_MIGRATIONS.md index 6b25da3..9e3bed4 100644 --- a/docs/DB_MIGRATIONS.md +++ b/docs/DB_MIGRATIONS.md @@ -1,6 +1,6 @@ # Database Migrations -ExaFS uses [Flask-Migrate](https://flask-migrate.readthedocs.io/) (Alembic) for database schema management. All migration files are tracked in the `migrations/` directory. +ExaFS uses [Flask-Migrate](https://flask-migrate.readthedocs.io/) (Alembic) for database schema management. Migration files are shipped inside the `flowapp` package (`flowapp/migrations/`) and are found automatically — no `flask db init` is needed. ## New Installation @@ -38,9 +38,12 @@ Some deployments previously ran `flask db init` to create a local `migrations/` ```bash rm -rf migrations/ ``` - Then pull or copy the project's `migrations/` directory from git. + Migrations are now bundled inside the `flowapp` pip package — no local directory needed. -2. **Stamp the database** to register the baseline revision without re-running it (your schema is already up to date): +2. **Clear the old alembic_version** and **stamp the baseline** to register with the official migration track (your schema is already up to date): + ```sql + DELETE FROM alembic_version; + ``` ```bash flask db stamp 001_baseline ``` @@ -86,7 +89,7 @@ When you modify a database model, create a new migration: flask db migrate -m "Description of changes" ``` -Review the generated file in `migrations/versions/`, then apply it: +Review the generated file in `flowapp/migrations/versions/`, then apply it: ```bash flask db upgrade diff --git a/flowapp/__about__.py b/flowapp/__about__.py index d5b95ac..18aa589 100755 --- a/flowapp/__about__.py +++ b/flowapp/__about__.py @@ -1,4 +1,4 @@ -__version__ = "1.2.2" +__version__ = "1.2.2b2" __title__ = "ExaFS" __description__ = "Tool for creation, validation, and execution of ExaBGP messages." __author__ = "CESNET / Jiri Vrany, Petr Adamec, Josef Verich, Jakub Man" diff --git a/flowapp/__init__.py b/flowapp/__init__.py index 5bed6f8..fb78661 100644 --- a/flowapp/__init__.py +++ b/flowapp/__init__.py @@ -1,4 +1,6 @@ # -*- coding: utf-8 -*- +import os + from flask import Flask, redirect, render_template, session, url_for, flash from flask_sso import SSO @@ -13,9 +15,11 @@ from .__about__ import __version__ from .instance_config import InstanceConfig +# Migrations directory lives inside the package so it ships with pip install +_migrations_dir = os.path.join(os.path.dirname(__file__), "migrations") db = SQLAlchemy() -migrate = Migrate() +migrate = Migrate(directory=_migrations_dir) csrf = CSRFProtect() ext = SSO() sess = Session() diff --git a/migrations/README b/flowapp/migrations/README similarity index 100% rename from migrations/README rename to flowapp/migrations/README diff --git a/migrations/alembic.ini b/flowapp/migrations/alembic.ini similarity index 100% rename from migrations/alembic.ini rename to flowapp/migrations/alembic.ini diff --git a/migrations/env.py b/flowapp/migrations/env.py similarity index 100% rename from migrations/env.py rename to flowapp/migrations/env.py diff --git a/migrations/script.py.mako b/flowapp/migrations/script.py.mako similarity index 100% rename from migrations/script.py.mako rename to flowapp/migrations/script.py.mako diff --git a/migrations/versions/001_baseline.py b/flowapp/migrations/versions/001_baseline.py similarity index 100% rename from migrations/versions/001_baseline.py rename to flowapp/migrations/versions/001_baseline.py diff --git a/pyproject.toml b/pyproject.toml index 0fe354a..d52da3f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -80,4 +80,6 @@ flowapp = [ "templates/forms/*", "templates/layouts/*", "templates/pages/*", + "migrations/*", + "migrations/versions/*", ] \ No newline at end of file From d3689b4e912400a1146625d112af1fa429032768 Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Wed, 18 Feb 2026 17:15:27 +0100 Subject: [PATCH 06/13] Baseline migration: fix column/data checks for pre-v0.5 databases Added missing checks for very old databases (2019 era): - log.author column (added in v0.5.0) - community.comm, larcomm, extcomm columns (added ~v0.7) - rstate id=4 "whitelisted rule" seed data (added in v1.1.0) --- flowapp/migrations/versions/001_baseline.py | 27 +++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/flowapp/migrations/versions/001_baseline.py b/flowapp/migrations/versions/001_baseline.py index 76065dc..99f93fb 100644 --- a/flowapp/migrations/versions/001_baseline.py +++ b/flowapp/migrations/versions/001_baseline.py @@ -129,6 +129,13 @@ def upgrade(): sa.Column("rule_id", sa.Integer()), sa.Column("user_id", sa.Integer()), ) + else: + # Add author column if missing (pre-v0.5 databases) + if not _column_exists("log", "author"): + op.add_column( + "log", + sa.Column("author", sa.String(length=1000)), + ) # --- Junction tables --- @@ -210,6 +217,13 @@ def upgrade(): sa.column("role_id", sa.Integer), ) _seed_communities = False + # Add community columns if missing (pre-v0.7 databases) + for col_name in ("comm", "larcomm", "extcomm"): + if not _column_exists("community", col_name): + op.add_column( + "community", + sa.Column(col_name, sa.String(length=2047)), + ) # Add as_path column if missing (pre-v1.1 databases) if not _column_exists("community", "as_path"): op.add_column( @@ -490,6 +504,19 @@ def upgrade(): ], ) + # Ensure rstate has the "whitelisted rule" entry (id=4, added in v1.1.0) + if not _seed_rstates and _table_has_data("rstate"): + conn = op.get_bind() + result = conn.execute( + sa.text("SELECT COUNT(*) FROM rstate WHERE id = 4") + ) + if result.scalar() == 0: + conn.execute( + sa.text( + "INSERT INTO rstate (id, description) VALUES (4, 'whitelisted rule')" + ) + ) + if _seed_rstates and not _table_has_data("rstate"): op.bulk_insert( rstate_table, From cdaaf81d4313172ba504078f986328c15da586af Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Wed, 18 Feb 2026 17:17:23 +0100 Subject: [PATCH 07/13] version 1.2.2 release candidate, fixes #73 --- flowapp/__about__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flowapp/__about__.py b/flowapp/__about__.py index 18aa589..d5b95ac 100755 --- a/flowapp/__about__.py +++ b/flowapp/__about__.py @@ -1,4 +1,4 @@ -__version__ = "1.2.2b2" +__version__ = "1.2.2" __title__ = "ExaFS" __description__ = "Tool for creation, validation, and execution of ExaBGP messages." __author__ = "CESNET / Jiri Vrany, Petr Adamec, Josef Verich, Jakub Man" From f42c48084ea82fa9f50fc981381bef84deebc626 Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Wed, 18 Feb 2026 19:58:22 +0100 Subject: [PATCH 08/13] Update docs/DB_MIGRATIONS.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- docs/DB_MIGRATIONS.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/DB_MIGRATIONS.md b/docs/DB_MIGRATIONS.md index 9e3bed4..204c5c9 100644 --- a/docs/DB_MIGRATIONS.md +++ b/docs/DB_MIGRATIONS.md @@ -76,8 +76,8 @@ python scripts/migrate_v0x_to_v1.py This script: 1. Sets NULL organization limits to 0 -2. Assigns rules with `org_id=0` to the user's organization -3. Reports users with multiple organizations that need manual assignment +2. Helps assign existing rules to organizations based on users' organizations +3. Reports users with multiple organizations or ambiguous rule ownership that need manual assignment Feel free to contact jiri.vrany@cesnet.cz if you need help with the migration. From 9f66cb900888baae723b90928c52f0dcbd87f542 Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Wed, 18 Feb 2026 19:59:19 +0100 Subject: [PATCH 09/13] Update scripts/migrate_v0x_to_v1.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- scripts/migrate_v0x_to_v1.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/scripts/migrate_v0x_to_v1.py b/scripts/migrate_v0x_to_v1.py index 9a37511..229f93d 100644 --- a/scripts/migrate_v0x_to_v1.py +++ b/scripts/migrate_v0x_to_v1.py @@ -67,12 +67,18 @@ def migrate_org_data(): updated = 0 for row in data_records: - orgs = row.user.organization.all() + user = getattr(row, "user", None) + if user is None: + # Skip records that have no associated user to avoid AttributeError + # and leave them for potential manual investigation. + print(f" {model_name}: skipping record id={getattr(row, 'id', 'unknown')} with no associated user") + continue + orgs = user.organization.all() if len(orgs) == 1: row.org_id = orgs[0].id updated += 1 else: - users_with_multiple_orgs[row.user.email] = [org.name for org in orgs] + users_with_multiple_orgs[user.email] = [org.name for org in orgs] try: db.session.commit() From 6cf92c140d8cf3cd703f09cf17e3633ffde97f7d Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Wed, 18 Feb 2026 20:00:03 +0100 Subject: [PATCH 10/13] Update flowapp/migrations/versions/001_baseline.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- flowapp/migrations/versions/001_baseline.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flowapp/migrations/versions/001_baseline.py b/flowapp/migrations/versions/001_baseline.py index 99f93fb..fa61ef5 100644 --- a/flowapp/migrations/versions/001_baseline.py +++ b/flowapp/migrations/versions/001_baseline.py @@ -272,6 +272,7 @@ def upgrade(): sa.Integer(), sa.ForeignKey("organization.id"), nullable=True, + server_default="0", ), ) From f70a3577a3f6bed418059379acd21810539d316b Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Wed, 18 Feb 2026 20:03:46 +0100 Subject: [PATCH 11/13] Update flowapp/migrations/versions/001_baseline.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- flowapp/migrations/versions/001_baseline.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/flowapp/migrations/versions/001_baseline.py b/flowapp/migrations/versions/001_baseline.py index fa61ef5..c12ac7b 100644 --- a/flowapp/migrations/versions/001_baseline.py +++ b/flowapp/migrations/versions/001_baseline.py @@ -295,6 +295,18 @@ def upgrade(): nullable=False, ), ) + else: + # Ensure machine_api_key has all expected columns + if not _column_exists("machine_api_key", "org_id"): + op.add_column( + "machine_api_key", + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=True, + ), + ) # --- Rule tables --- From 33d68ed320661950f711e4add880d180d5fcda0b Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Wed, 18 Feb 2026 20:09:30 +0100 Subject: [PATCH 12/13] Update flowapp/migrations/versions/001_baseline.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- flowapp/migrations/versions/001_baseline.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/flowapp/migrations/versions/001_baseline.py b/flowapp/migrations/versions/001_baseline.py index c12ac7b..59dd32f 100644 --- a/flowapp/migrations/versions/001_baseline.py +++ b/flowapp/migrations/versions/001_baseline.py @@ -41,7 +41,9 @@ def _column_exists(table_name, column_name): def _table_has_data(table_name): """Check if a table has any rows.""" conn = op.get_bind() - result = conn.execute(sa.text(f"SELECT COUNT(*) FROM `{table_name}`")) + table_clause = sa.table(table_name) + stmt = sa.select(sa.func.count()).select_from(table_clause) + result = conn.execute(stmt) return result.scalar() > 0 From 5c16cc3f90b5b3d12fd3ecca045185c0792a3966 Mon Sep 17 00:00:00 2001 From: Jiri Vrany Date: Thu, 19 Feb 2026 09:49:13 +0100 Subject: [PATCH 13/13] add test for base migration --- flowapp/migrations/versions/001_baseline.py | 111 +- tests/test_migration.py | 1429 +++++++++++++++++++ 2 files changed, 1450 insertions(+), 90 deletions(-) create mode 100644 tests/test_migration.py diff --git a/flowapp/migrations/versions/001_baseline.py b/flowapp/migrations/versions/001_baseline.py index 59dd32f..9ad7859 100644 --- a/flowapp/migrations/versions/001_baseline.py +++ b/flowapp/migrations/versions/001_baseline.py @@ -14,6 +14,7 @@ Create Date: 2026-02-13 """ + from alembic import op import sqlalchemy as sa @@ -76,16 +77,12 @@ def upgrade(): sa.Column("limit_flowspec6", sa.Integer(), default=0), sa.Column("limit_rtbh", sa.Integer(), default=0), ) - _seed_orgs = True else: organization_table = None - _seed_orgs = False # Add limit columns if missing (pre-v1.0 databases) for col_name in ("limit_flowspec4", "limit_flowspec6", "limit_rtbh"): if not _column_exists("organization", col_name): - op.add_column( - "organization", sa.Column(col_name, sa.Integer(), default=0) - ) + op.add_column("organization", sa.Column(col_name, sa.Integer(), default=0)) if not _table_exists("rstate"): rstate_table = op.create_table( @@ -144,21 +141,15 @@ def upgrade(): if not _table_exists("user_role"): op.create_table( "user_role", - sa.Column( - "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False - ), - sa.Column( - "role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False - ), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column("role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False), sa.PrimaryKeyConstraint("user_id", "role_id"), ) if not _table_exists("user_organization"): op.create_table( "user_organization", - sa.Column( - "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False - ), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), sa.Column( "organization_id", sa.Integer(), @@ -177,9 +168,7 @@ def upgrade(): sa.Column("name", sa.String(length=120), unique=True), sa.Column("command", sa.String(length=120), unique=True), sa.Column("description", sa.String(length=260)), - sa.Column( - "role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False - ), + sa.Column("role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False), ) _seed_actions = True else: @@ -202,9 +191,7 @@ def upgrade(): sa.Column("extcomm", sa.String(length=2047)), sa.Column("description", sa.String(length=255)), sa.Column("as_path", sa.Boolean(), default=False), - sa.Column( - "role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False - ), + sa.Column("role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False), ) _seed_communities = True else: @@ -244,9 +231,7 @@ def upgrade(): sa.Column("readonly", sa.Boolean(), default=False), sa.Column("expires", sa.DateTime(), nullable=True), sa.Column("comment", sa.String(length=255)), - sa.Column( - "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False - ), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), sa.Column( "org_id", sa.Integer(), @@ -272,7 +257,6 @@ def upgrade(): sa.Column( "org_id", sa.Integer(), - sa.ForeignKey("organization.id"), nullable=True, server_default="0", ), @@ -287,9 +271,7 @@ def upgrade(): sa.Column("readonly", sa.Boolean(), default=True), sa.Column("expires", sa.DateTime(), nullable=True), sa.Column("comment", sa.String(length=255)), - sa.Column( - "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False - ), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), sa.Column( "org_id", sa.Integer(), @@ -305,7 +287,6 @@ def upgrade(): sa.Column( "org_id", sa.Integer(), - sa.ForeignKey("organization.id"), nullable=True, ), ) @@ -329,21 +310,15 @@ def upgrade(): sa.Column("comment", sa.Text()), sa.Column("expires", sa.DateTime()), sa.Column("created", sa.DateTime()), - sa.Column( - "action_id", sa.Integer(), sa.ForeignKey("action.id"), nullable=False - ), - sa.Column( - "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False - ), + sa.Column("action_id", sa.Integer(), sa.ForeignKey("action.id"), nullable=False), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), sa.Column( "org_id", sa.Integer(), sa.ForeignKey("organization.id"), nullable=False, ), - sa.Column( - "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False - ), + sa.Column("rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False), ) else: if not _column_exists("flowspec4", "fragment"): @@ -357,7 +332,6 @@ def upgrade(): sa.Column( "org_id", sa.Integer(), - sa.ForeignKey("organization.id"), nullable=True, ), ) @@ -378,21 +352,15 @@ def upgrade(): sa.Column("comment", sa.Text()), sa.Column("expires", sa.DateTime()), sa.Column("created", sa.DateTime()), - sa.Column( - "action_id", sa.Integer(), sa.ForeignKey("action.id"), nullable=False - ), - sa.Column( - "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False - ), + sa.Column("action_id", sa.Integer(), sa.ForeignKey("action.id"), nullable=False), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), sa.Column( "org_id", sa.Integer(), sa.ForeignKey("organization.id"), nullable=False, ), - sa.Column( - "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False - ), + sa.Column("rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False), ) else: if not _column_exists("flowspec6", "org_id"): @@ -401,7 +369,6 @@ def upgrade(): sa.Column( "org_id", sa.Integer(), - sa.ForeignKey("organization.id"), nullable=True, ), ) @@ -423,18 +390,14 @@ def upgrade(): sa.Column("comment", sa.Text()), sa.Column("expires", sa.DateTime()), sa.Column("created", sa.DateTime()), - sa.Column( - "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False - ), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), sa.Column( "org_id", sa.Integer(), sa.ForeignKey("organization.id"), nullable=False, ), - sa.Column( - "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False - ), + sa.Column("rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False), ) else: if not _column_exists("RTBH", "org_id"): @@ -443,7 +406,6 @@ def upgrade(): sa.Column( "org_id", sa.Integer(), - sa.ForeignKey("organization.id"), nullable=True, ), ) @@ -457,18 +419,14 @@ def upgrade(): sa.Column("comment", sa.Text()), sa.Column("expires", sa.DateTime()), sa.Column("created", sa.DateTime()), - sa.Column( - "user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False - ), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), sa.Column( "org_id", sa.Integer(), sa.ForeignKey("organization.id"), nullable=False, ), - sa.Column( - "rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False - ), + sa.Column("rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False), ) if not _table_exists("rule_whitelist_cache"): @@ -498,39 +456,12 @@ def upgrade(): ], ) - if _seed_orgs and organization_table is not None: - op.bulk_insert( - organization_table, - [ - { - "name": "TU Liberec", - "arange": "147.230.0.0/16\n2001:718:1c01::/48", - "limit_flowspec4": 0, - "limit_flowspec6": 0, - "limit_rtbh": 0, - }, - { - "name": "Cesnet", - "arange": "147.230.0.0/16\n2001:718:1c01::/48", - "limit_flowspec4": 0, - "limit_flowspec6": 0, - "limit_rtbh": 0, - }, - ], - ) - # Ensure rstate has the "whitelisted rule" entry (id=4, added in v1.1.0) if not _seed_rstates and _table_has_data("rstate"): conn = op.get_bind() - result = conn.execute( - sa.text("SELECT COUNT(*) FROM rstate WHERE id = 4") - ) + result = conn.execute(sa.text("SELECT COUNT(*) FROM rstate WHERE id = 4")) if result.scalar() == 0: - conn.execute( - sa.text( - "INSERT INTO rstate (id, description) VALUES (4, 'whitelisted rule')" - ) - ) + conn.execute(sa.text("INSERT INTO rstate (id, description) VALUES (4, 'whitelisted rule')")) if _seed_rstates and not _table_has_data("rstate"): op.bulk_insert( diff --git a/tests/test_migration.py b/tests/test_migration.py new file mode 100644 index 0000000..8f90c95 --- /dev/null +++ b/tests/test_migration.py @@ -0,0 +1,1429 @@ +""" +Tests for the baseline migration (001_baseline). + +Verifies that the idempotent migration correctly handles: +- Fresh installs (empty database) +- Running twice (idempotent behavior) +- Upgrading from v0.4 schema (pre-fragment, pre-org_id, pre-author) +- Upgrading from v0.8 schema (pre-org_id, pre-as_path) +- Upgrading from v1.0 schema (pre-as_path, pre-whitelist) +- Upgrading from real 2019 database backup (exact production schema) +- Preserving existing data during migration +""" + +import os + +import pytest +from flask import Flask +from flask_sqlalchemy import SQLAlchemy +from flask_migrate import Migrate, upgrade +from sqlalchemy import create_engine, inspect, text + +import flowapp + + +# --- Expected schema (v1.2.2) --- + +EXPECTED_TABLES = { + "role", "organization", "rstate", "user", "as_path", "log", + "user_role", "user_organization", "action", "community", + "api_key", "machine_api_key", "flowspec4", "flowspec6", + "RTBH", "whitelist", "rule_whitelist_cache", "alembic_version", +} + +EXPECTED_COLUMNS = { + "organization": {"id", "name", "arange", "limit_flowspec4", "limit_flowspec6", "limit_rtbh"}, + "community": {"id", "name", "comm", "larcomm", "extcomm", "description", "as_path", "role_id"}, + "log": {"id", "time", "task", "author", "rule_type", "rule_id", "user_id"}, + "api_key": {"id", "machine", "key", "readonly", "expires", "comment", "user_id", "org_id"}, + "flowspec4": { + "id", "source", "source_mask", "source_port", "dest", "dest_mask", + "dest_port", "protocol", "flags", "packet_len", "fragment", + "comment", "expires", "created", "action_id", "user_id", "org_id", "rstate_id", + }, + "flowspec6": { + "id", "source", "source_mask", "source_port", "dest", "dest_mask", + "dest_port", "next_header", "flags", "packet_len", + "comment", "expires", "created", "action_id", "user_id", "org_id", "rstate_id", + }, + "RTBH": { + "id", "ipv4", "ipv4_mask", "ipv6", "ipv6_mask", "community_id", + "comment", "expires", "created", "user_id", "org_id", "rstate_id", + }, +} + + +# --- Helpers --- + +def _create_app(db_uri): + """ + Create a minimal Flask app with its own SQLAlchemy and Migrate instances. + This avoids conflicts with the global db/migrate from flowapp. + """ + app = Flask(__name__) + app.config.update( + SQLALCHEMY_DATABASE_URI=db_uri, + SQLALCHEMY_TRACK_MODIFICATIONS=False, + TESTING=True, + SECRET_KEY="testing", + ) + db = SQLAlchemy() + db.init_app(app) + migrate = Migrate(app, db, directory=flowapp._migrations_dir) + return app + + +def _get_tables(db_uri): + """Get set of table names in the database.""" + engine = create_engine(db_uri) + tables = set(inspect(engine).get_table_names()) + engine.dispose() + return tables + + +def _get_columns(db_uri, table_name): + """Get set of column names for a table.""" + engine = create_engine(db_uri) + cols = {c["name"] for c in inspect(engine).get_columns(table_name)} + engine.dispose() + return cols + + +def _query_scalar(db_uri, sql): + """Execute a scalar SQL query and return the result.""" + engine = create_engine(db_uri) + with engine.connect() as conn: + result = conn.execute(text(sql)).scalar() + engine.dispose() + return result + + +def _run_migration(app): + """Run flask db upgrade within app context.""" + with app.app_context(): + upgrade() + + +def _clear_alembic_version(db_uri): + """Clear alembic_version table (simulates: DELETE FROM alembic_version). + + Required before running migrations on databases that have an old + alembic_version from user-generated migrations. + """ + engine = create_engine(db_uri) + with engine.connect() as conn: + conn.execute(text("DELETE FROM alembic_version")) + conn.commit() + engine.dispose() + + +def _create_real_2019_schema(db_uri): + """ + Create tables matching the exact production schema from a 2019-02-14 backup. + Based on flowspec_db_190214.sql (MariaDB 5.5.60), with anonymized data. + + Key differences from our synthetic v0.4 schema: + - community has 'command' column (later removed), no comm/larcomm/extcomm/as_path + - log has no 'author' column + - organization has no limit_* columns + - flowspec4 has no 'fragment' or 'org_id' + - flowspec6/RTBH have no 'org_id' + - api_key has no 'readonly', 'expires', 'comment', 'org_id' + - rstate has only 3 entries (no id=4 'whitelisted rule') + - alembic_version exists with old revision '7a816ca986b3' + - Contains sample data matching the shape of the real backup + """ + engine = create_engine(db_uri) + with engine.connect() as conn: + conn.execute(text(""" + CREATE TABLE role ( + id INTEGER PRIMARY KEY, + name VARCHAR(20) UNIQUE, + description VARCHAR(260) + ) + """)) + conn.execute(text(""" + INSERT INTO role VALUES + (1,'view','just view, no edit'), + (2,'user','can edit'), + (3,'admin','admin') + """)) + + conn.execute(text(""" + CREATE TABLE organization ( + id INTEGER PRIMARY KEY, + name VARCHAR(150) UNIQUE, + arange TEXT + ) + """)) + conn.execute(text(""" + INSERT INTO organization VALUES + (1,'University Alpha','192.0.2.0/24'), + (2,'Research Net','198.51.100.0/24') + """)) + + conn.execute(text(""" + CREATE TABLE rstate ( + id INTEGER PRIMARY KEY, + description VARCHAR(260) + ) + """)) + conn.execute(text(""" + INSERT INTO rstate VALUES + (1,'active rule'), + (2,'withdrawed rule'), + (3,'deleted rule') + """)) + + conn.execute(text(""" + CREATE TABLE user ( + id INTEGER PRIMARY KEY, + uuid VARCHAR(180) UNIQUE, + comment VARCHAR(500), + email VARCHAR(255), + name VARCHAR(255), + phone VARCHAR(255) + ) + """)) + conn.execute(text(""" + INSERT INTO user VALUES + (1,'alice@example.edu','test comment','alice@example.edu','Alice Test','+1 555 0101'), + (3,'bob@example.org','Bob Admin','bob@example.org','Bob Admin','+1 555 0102'), + (4,'charlie@example.org','Charlie Ops','charlie@example.org','Charlie Ops','+1 555 0103') + """)) + + conn.execute(text(""" + CREATE TABLE user_role ( + user_id INTEGER NOT NULL REFERENCES user(id), + role_id INTEGER NOT NULL REFERENCES role(id), + PRIMARY KEY (user_id, role_id) + ) + """)) + conn.execute(text(""" + INSERT INTO user_role VALUES (1,3),(3,3),(4,3) + """)) + + conn.execute(text(""" + CREATE TABLE user_organization ( + user_id INTEGER NOT NULL REFERENCES user(id), + organization_id INTEGER NOT NULL REFERENCES organization(id), + PRIMARY KEY (user_id, organization_id) + ) + """)) + conn.execute(text(""" + INSERT INTO user_organization VALUES (1,1),(3,2),(4,2) + """)) + + # 2019 log table — no 'author' column + conn.execute(text(""" + CREATE TABLE log ( + id INTEGER PRIMARY KEY, + time DATETIME, + task VARCHAR(1000), + rule_type INTEGER, + rule_id INTEGER, + user_id INTEGER REFERENCES user(id) + ) + """)) + conn.execute(text(""" + INSERT INTO log VALUES + (1,'2018-03-05 17:50:39','withdraw flow route',4,45,4), + (2,'2018-03-06 09:55:01','announce flow route',4,52,3) + """)) + + conn.execute(text(""" + CREATE TABLE action ( + id INTEGER PRIMARY KEY, + name VARCHAR(120) UNIQUE, + command VARCHAR(120) UNIQUE, + description VARCHAR(260), + role_id INTEGER REFERENCES role(id) + ) + """)) + conn.execute(text(""" + INSERT INTO action VALUES + (1,'QoS 0.1 Mbps','rate-limit 12800','QoS',2), + (7,'Discard','discard','Discard',2), + (9,'Redirect to scrubber','redirect 65535:1001','Redirect',2) + """)) + + # 2019 community — has 'command' column (later removed), + # no comm/larcomm/extcomm/as_path + conn.execute(text(""" + CREATE TABLE community ( + id INTEGER PRIMARY KEY, + name VARCHAR(120) UNIQUE, + command VARCHAR(120) UNIQUE, + description VARCHAR(260), + role_id INTEGER REFERENCES role(id) + ) + """)) + conn.execute(text(""" + INSERT INTO community VALUES + (4,'RTBH IXP','65535:666','IXP RTBH community',2), + (5,'RTBH Internal','64496:9999','Internal RTBH',2) + """)) + + # 2019 api_key — no readonly, expires, comment, org_id + conn.execute(text(""" + CREATE TABLE api_key ( + id INTEGER PRIMARY KEY, + machine VARCHAR(255), + key VARCHAR(255), + user_id INTEGER REFERENCES user(id) + ) + """)) + conn.execute(text(""" + INSERT INTO api_key VALUES + (3,'192.0.2.10','a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4',3) + """)) + + # 2019 flowspec4 — no fragment, no org_id + conn.execute(text(""" + CREATE TABLE flowspec4 ( + id INTEGER PRIMARY KEY, + source VARCHAR(255), + source_mask INTEGER, + source_port VARCHAR(255), + dest VARCHAR(255), + dest_mask INTEGER, + dest_port VARCHAR(255), + protocol VARCHAR(255), + flags VARCHAR(255), + packet_len VARCHAR(255), + comment TEXT, + expires DATETIME, + created DATETIME, + action_id INTEGER REFERENCES action(id), + user_id INTEGER REFERENCES user(id), + rstate_id INTEGER REFERENCES rstate(id) + ) + """)) + conn.execute(text(""" + INSERT INTO flowspec4 VALUES + (16,'203.0.113.0',24,'','',NULL,'22','tcp','','','','2019-02-08 12:30:00','2019-01-27 14:19:44',1,3,1), + (27,'198.51.100.1',32,'','',NULL,'','tcp','SYN','300-9000','Suspicious SYN','2019-02-10 00:00:00','2019-02-06 12:50:56',7,4,1) + """)) + + # 2019 flowspec6 — no org_id + conn.execute(text(""" + CREATE TABLE flowspec6 ( + id INTEGER PRIMARY KEY, + source VARCHAR(255), + source_mask INTEGER, + source_port VARCHAR(255), + dest VARCHAR(255), + dest_mask INTEGER, + dest_port VARCHAR(255), + next_header VARCHAR(255), + flags VARCHAR(255), + packet_len VARCHAR(255), + comment TEXT, + expires DATETIME, + created DATETIME, + action_id INTEGER REFERENCES action(id), + user_id INTEGER REFERENCES user(id), + rstate_id INTEGER REFERENCES rstate(id) + ) + """)) + + # 2019 RTBH — no org_id + conn.execute(text(""" + CREATE TABLE RTBH ( + id INTEGER PRIMARY KEY, + ipv4 VARCHAR(255), + ipv4_mask INTEGER, + ipv6 VARCHAR(255), + ipv6_mask INTEGER, + comment TEXT, + expires DATETIME, + created DATETIME, + user_id INTEGER REFERENCES user(id), + rstate_id INTEGER REFERENCES rstate(id), + community_id INTEGER REFERENCES community(id) + ) + """)) + + # alembic_version with old user-generated revision + conn.execute(text(""" + CREATE TABLE alembic_version ( + version_num VARCHAR(32) PRIMARY KEY + ) + """)) + conn.execute(text(""" + INSERT INTO alembic_version VALUES ('7a816ca986b3') + """)) + + conn.commit() + engine.dispose() + + +def _create_v04_schema(db_uri): + """ + Create tables matching approximately v0.4 schema. + Missing: fragment (flowspec4), org_id (all rules), author (log), + comm/larcomm/extcomm (community), as_path (community), + limit columns (organization), rstate id=4, + as_path table, whitelist, rule_whitelist_cache, machine_api_key + """ + engine = create_engine(db_uri) + with engine.connect() as conn: + conn.execute(text(""" + CREATE TABLE role ( + id INTEGER PRIMARY KEY, + name VARCHAR(20) UNIQUE, + description VARCHAR(260) + ) + """)) + conn.execute(text(""" + INSERT INTO role (id, name, description) VALUES + (1, 'view', 'just view, no edit'), + (2, 'user', 'can edit'), + (3, 'admin', 'admin') + """)) + + conn.execute(text(""" + CREATE TABLE organization ( + id INTEGER PRIMARY KEY, + name VARCHAR(150) UNIQUE, + arange TEXT + ) + """)) + conn.execute(text(""" + INSERT INTO organization (id, name, arange) VALUES + (1, 'TestOrg', '10.0.0.0/8') + """)) + + conn.execute(text(""" + CREATE TABLE rstate ( + id INTEGER PRIMARY KEY, + description VARCHAR(260) + ) + """)) + conn.execute(text(""" + INSERT INTO rstate (id, description) VALUES + (1, 'active rule'), + (2, 'withdrawed rule'), + (3, 'deleted rule') + """)) + + conn.execute(text(""" + CREATE TABLE user ( + id INTEGER PRIMARY KEY, + uuid VARCHAR(180) UNIQUE, + comment VARCHAR(500), + email VARCHAR(255), + name VARCHAR(255), + phone VARCHAR(255) + ) + """)) + conn.execute(text(""" + INSERT INTO user (id, uuid, email) VALUES (1, 'test@test.cz', 'test@test.cz') + """)) + + conn.execute(text(""" + CREATE TABLE user_role ( + user_id INTEGER NOT NULL REFERENCES user(id), + role_id INTEGER NOT NULL REFERENCES role(id), + PRIMARY KEY (user_id, role_id) + ) + """)) + conn.execute(text("INSERT INTO user_role (user_id, role_id) VALUES (1, 3)")) + + conn.execute(text(""" + CREATE TABLE user_organization ( + user_id INTEGER NOT NULL REFERENCES user(id), + organization_id INTEGER NOT NULL REFERENCES organization(id), + PRIMARY KEY (user_id, organization_id) + ) + """)) + conn.execute(text("INSERT INTO user_organization (user_id, organization_id) VALUES (1, 1)")) + + conn.execute(text(""" + CREATE TABLE log ( + id INTEGER PRIMARY KEY, + time DATETIME, + task VARCHAR(1000), + rule_type INTEGER, + rule_id INTEGER, + user_id INTEGER + ) + """)) + + conn.execute(text(""" + CREATE TABLE action ( + id INTEGER PRIMARY KEY, + name VARCHAR(120) UNIQUE, + command VARCHAR(120) UNIQUE, + description VARCHAR(260), + role_id INTEGER NOT NULL REFERENCES role(id) + ) + """)) + conn.execute(text(""" + INSERT INTO action (id, name, command, description, role_id) VALUES + (1, 'Discard', 'discard', 'Discard', 2) + """)) + + # Community without comm, larcomm, extcomm, as_path + conn.execute(text(""" + CREATE TABLE community ( + id INTEGER PRIMARY KEY, + name VARCHAR(120) UNIQUE, + description VARCHAR(255), + role_id INTEGER NOT NULL REFERENCES role(id) + ) + """)) + conn.execute(text(""" + INSERT INTO community (id, name, description, role_id) VALUES + (1, '65535:65283', 'local-as', 2) + """)) + + # api_key without comment, readonly, expires, org_id + conn.execute(text(""" + CREATE TABLE api_key ( + id INTEGER PRIMARY KEY, + machine VARCHAR(255), + key VARCHAR(255), + user_id INTEGER NOT NULL REFERENCES user(id) + ) + """)) + + # flowspec4 without fragment and org_id + conn.execute(text(""" + CREATE TABLE flowspec4 ( + id INTEGER PRIMARY KEY, + source VARCHAR(255), + source_mask INTEGER, + source_port VARCHAR(255), + dest VARCHAR(255), + dest_mask INTEGER, + dest_port VARCHAR(255), + protocol VARCHAR(255), + flags VARCHAR(255), + packet_len VARCHAR(255), + comment TEXT, + expires DATETIME, + created DATETIME, + action_id INTEGER NOT NULL REFERENCES action(id), + user_id INTEGER NOT NULL REFERENCES user(id), + rstate_id INTEGER NOT NULL REFERENCES rstate(id) + ) + """)) + + # flowspec6 without org_id + conn.execute(text(""" + CREATE TABLE flowspec6 ( + id INTEGER PRIMARY KEY, + source VARCHAR(255), + source_mask INTEGER, + source_port VARCHAR(255), + dest VARCHAR(255), + dest_mask INTEGER, + dest_port VARCHAR(255), + next_header VARCHAR(255), + flags VARCHAR(255), + packet_len VARCHAR(255), + comment TEXT, + expires DATETIME, + created DATETIME, + action_id INTEGER NOT NULL REFERENCES action(id), + user_id INTEGER NOT NULL REFERENCES user(id), + rstate_id INTEGER NOT NULL REFERENCES rstate(id) + ) + """)) + + # RTBH without org_id + conn.execute(text(""" + CREATE TABLE RTBH ( + id INTEGER PRIMARY KEY, + ipv4 VARCHAR(255), + ipv4_mask INTEGER, + ipv6 VARCHAR(255), + ipv6_mask INTEGER, + community_id INTEGER NOT NULL REFERENCES community(id), + comment TEXT, + expires DATETIME, + created DATETIME, + user_id INTEGER NOT NULL REFERENCES user(id), + rstate_id INTEGER NOT NULL REFERENCES rstate(id) + ) + """)) + + conn.commit() + engine.dispose() + + +def _create_v08_schema(db_uri): + """ + Create tables matching approximately v0.8 schema. + Has comm/larcomm/extcomm on community, api_key has readonly/expires. + Missing: org_id (all rules + api_key), as_path (community), + limit columns (organization), rstate id=4, + as_path table, whitelist, rule_whitelist_cache, machine_api_key + """ + engine = create_engine(db_uri) + with engine.connect() as conn: + conn.execute(text(""" + CREATE TABLE role ( + id INTEGER PRIMARY KEY, + name VARCHAR(20) UNIQUE, + description VARCHAR(260) + ) + """)) + conn.execute(text(""" + INSERT INTO role (id, name, description) VALUES + (1, 'view', 'just view, no edit'), + (2, 'user', 'can edit'), + (3, 'admin', 'admin') + """)) + + conn.execute(text(""" + CREATE TABLE organization ( + id INTEGER PRIMARY KEY, + name VARCHAR(150) UNIQUE, + arange TEXT + ) + """)) + + conn.execute(text(""" + CREATE TABLE rstate ( + id INTEGER PRIMARY KEY, + description VARCHAR(260) + ) + """)) + conn.execute(text(""" + INSERT INTO rstate (id, description) VALUES + (1, 'active rule'), + (2, 'withdrawed rule'), + (3, 'deleted rule') + """)) + + conn.execute(text(""" + CREATE TABLE user ( + id INTEGER PRIMARY KEY, + uuid VARCHAR(180) UNIQUE, + comment VARCHAR(500), + email VARCHAR(255), + name VARCHAR(255), + phone VARCHAR(255) + ) + """)) + + conn.execute(text(""" + CREATE TABLE user_role ( + user_id INTEGER NOT NULL REFERENCES user(id), + role_id INTEGER NOT NULL REFERENCES role(id), + PRIMARY KEY (user_id, role_id) + ) + """)) + + conn.execute(text(""" + CREATE TABLE user_organization ( + user_id INTEGER NOT NULL REFERENCES user(id), + organization_id INTEGER NOT NULL REFERENCES organization(id), + PRIMARY KEY (user_id, organization_id) + ) + """)) + + conn.execute(text(""" + CREATE TABLE log ( + id INTEGER PRIMARY KEY, + time DATETIME, + task VARCHAR(1000), + author VARCHAR(1000), + rule_type INTEGER, + rule_id INTEGER, + user_id INTEGER + ) + """)) + + conn.execute(text(""" + CREATE TABLE action ( + id INTEGER PRIMARY KEY, + name VARCHAR(120) UNIQUE, + command VARCHAR(120) UNIQUE, + description VARCHAR(260), + role_id INTEGER NOT NULL REFERENCES role(id) + ) + """)) + + # Community with comm, larcomm, extcomm but no as_path + conn.execute(text(""" + CREATE TABLE community ( + id INTEGER PRIMARY KEY, + name VARCHAR(120) UNIQUE, + comm VARCHAR(2047), + larcomm VARCHAR(2047), + extcomm VARCHAR(2047), + description VARCHAR(255), + role_id INTEGER NOT NULL REFERENCES role(id) + ) + """)) + + # api_key with readonly and expires but no org_id or comment + conn.execute(text(""" + CREATE TABLE api_key ( + id INTEGER PRIMARY KEY, + machine VARCHAR(255), + key VARCHAR(255), + readonly BOOLEAN DEFAULT 0, + expires DATETIME, + user_id INTEGER NOT NULL REFERENCES user(id) + ) + """)) + + # flowspec4 with fragment but no org_id + conn.execute(text(""" + CREATE TABLE flowspec4 ( + id INTEGER PRIMARY KEY, + source VARCHAR(255), + source_mask INTEGER, + source_port VARCHAR(255), + dest VARCHAR(255), + dest_mask INTEGER, + dest_port VARCHAR(255), + protocol VARCHAR(255), + flags VARCHAR(255), + packet_len VARCHAR(255), + fragment VARCHAR(255), + comment TEXT, + expires DATETIME, + created DATETIME, + action_id INTEGER NOT NULL REFERENCES action(id), + user_id INTEGER NOT NULL REFERENCES user(id), + rstate_id INTEGER NOT NULL REFERENCES rstate(id) + ) + """)) + + conn.execute(text(""" + CREATE TABLE flowspec6 ( + id INTEGER PRIMARY KEY, + source VARCHAR(255), + source_mask INTEGER, + source_port VARCHAR(255), + dest VARCHAR(255), + dest_mask INTEGER, + dest_port VARCHAR(255), + next_header VARCHAR(255), + flags VARCHAR(255), + packet_len VARCHAR(255), + comment TEXT, + expires DATETIME, + created DATETIME, + action_id INTEGER NOT NULL REFERENCES action(id), + user_id INTEGER NOT NULL REFERENCES user(id), + rstate_id INTEGER NOT NULL REFERENCES rstate(id) + ) + """)) + + conn.execute(text(""" + CREATE TABLE RTBH ( + id INTEGER PRIMARY KEY, + ipv4 VARCHAR(255), + ipv4_mask INTEGER, + ipv6 VARCHAR(255), + ipv6_mask INTEGER, + community_id INTEGER NOT NULL REFERENCES community(id), + comment TEXT, + expires DATETIME, + created DATETIME, + user_id INTEGER NOT NULL REFERENCES user(id), + rstate_id INTEGER NOT NULL REFERENCES rstate(id) + ) + """)) + + conn.commit() + engine.dispose() + + +def _create_v10_schema(db_uri): + """ + Create tables matching approximately v1.0 schema. + Has org_id on rules, limit columns on organization. + Missing: as_path (community), rstate id=4, + as_path table, whitelist, rule_whitelist_cache + """ + engine = create_engine(db_uri) + with engine.connect() as conn: + conn.execute(text(""" + CREATE TABLE role ( + id INTEGER PRIMARY KEY, + name VARCHAR(20) UNIQUE, + description VARCHAR(260) + ) + """)) + conn.execute(text(""" + INSERT INTO role (id, name, description) VALUES + (1, 'view', 'just view, no edit'), + (2, 'user', 'can edit'), + (3, 'admin', 'admin') + """)) + + conn.execute(text(""" + CREATE TABLE organization ( + id INTEGER PRIMARY KEY, + name VARCHAR(150) UNIQUE, + arange TEXT, + limit_flowspec4 INTEGER DEFAULT 0, + limit_flowspec6 INTEGER DEFAULT 0, + limit_rtbh INTEGER DEFAULT 0 + ) + """)) + + conn.execute(text(""" + CREATE TABLE rstate ( + id INTEGER PRIMARY KEY, + description VARCHAR(260) + ) + """)) + conn.execute(text(""" + INSERT INTO rstate (id, description) VALUES + (1, 'active rule'), + (2, 'withdrawed rule'), + (3, 'deleted rule') + """)) + + conn.execute(text(""" + CREATE TABLE user ( + id INTEGER PRIMARY KEY, + uuid VARCHAR(180) UNIQUE, + comment VARCHAR(500), + email VARCHAR(255), + name VARCHAR(255), + phone VARCHAR(255) + ) + """)) + + conn.execute(text(""" + CREATE TABLE user_role ( + user_id INTEGER NOT NULL REFERENCES user(id), + role_id INTEGER NOT NULL REFERENCES role(id), + PRIMARY KEY (user_id, role_id) + ) + """)) + + conn.execute(text(""" + CREATE TABLE user_organization ( + user_id INTEGER NOT NULL REFERENCES user(id), + organization_id INTEGER NOT NULL REFERENCES organization(id), + PRIMARY KEY (user_id, organization_id) + ) + """)) + + conn.execute(text(""" + CREATE TABLE log ( + id INTEGER PRIMARY KEY, + time DATETIME, + task VARCHAR(1000), + author VARCHAR(1000), + rule_type INTEGER, + rule_id INTEGER, + user_id INTEGER + ) + """)) + + conn.execute(text(""" + CREATE TABLE action ( + id INTEGER PRIMARY KEY, + name VARCHAR(120) UNIQUE, + command VARCHAR(120) UNIQUE, + description VARCHAR(260), + role_id INTEGER NOT NULL REFERENCES role(id) + ) + """)) + + # Community with comm columns but no as_path + conn.execute(text(""" + CREATE TABLE community ( + id INTEGER PRIMARY KEY, + name VARCHAR(120) UNIQUE, + comm VARCHAR(2047), + larcomm VARCHAR(2047), + extcomm VARCHAR(2047), + description VARCHAR(255), + role_id INTEGER NOT NULL REFERENCES role(id) + ) + """)) + + conn.execute(text(""" + CREATE TABLE api_key ( + id INTEGER PRIMARY KEY, + machine VARCHAR(255), + key VARCHAR(255), + readonly BOOLEAN DEFAULT 0, + expires DATETIME, + comment VARCHAR(255), + user_id INTEGER NOT NULL REFERENCES user(id), + org_id INTEGER REFERENCES organization(id) + ) + """)) + + conn.execute(text(""" + CREATE TABLE machine_api_key ( + id INTEGER PRIMARY KEY, + machine VARCHAR(255), + key VARCHAR(255), + readonly BOOLEAN DEFAULT 1, + expires DATETIME, + comment VARCHAR(255), + user_id INTEGER NOT NULL REFERENCES user(id), + org_id INTEGER NOT NULL REFERENCES organization(id) + ) + """)) + + conn.execute(text(""" + CREATE TABLE flowspec4 ( + id INTEGER PRIMARY KEY, + source VARCHAR(255), + source_mask INTEGER, + source_port VARCHAR(255), + dest VARCHAR(255), + dest_mask INTEGER, + dest_port VARCHAR(255), + protocol VARCHAR(255), + flags VARCHAR(255), + packet_len VARCHAR(255), + fragment VARCHAR(255), + comment TEXT, + expires DATETIME, + created DATETIME, + action_id INTEGER NOT NULL REFERENCES action(id), + user_id INTEGER NOT NULL REFERENCES user(id), + org_id INTEGER NOT NULL REFERENCES organization(id), + rstate_id INTEGER NOT NULL REFERENCES rstate(id) + ) + """)) + + conn.execute(text(""" + CREATE TABLE flowspec6 ( + id INTEGER PRIMARY KEY, + source VARCHAR(255), + source_mask INTEGER, + source_port VARCHAR(255), + dest VARCHAR(255), + dest_mask INTEGER, + dest_port VARCHAR(255), + next_header VARCHAR(255), + flags VARCHAR(255), + packet_len VARCHAR(255), + comment TEXT, + expires DATETIME, + created DATETIME, + action_id INTEGER NOT NULL REFERENCES action(id), + user_id INTEGER NOT NULL REFERENCES user(id), + org_id INTEGER NOT NULL REFERENCES organization(id), + rstate_id INTEGER NOT NULL REFERENCES rstate(id) + ) + """)) + + conn.execute(text(""" + CREATE TABLE RTBH ( + id INTEGER PRIMARY KEY, + ipv4 VARCHAR(255), + ipv4_mask INTEGER, + ipv6 VARCHAR(255), + ipv6_mask INTEGER, + community_id INTEGER NOT NULL REFERENCES community(id), + comment TEXT, + expires DATETIME, + created DATETIME, + user_id INTEGER NOT NULL REFERENCES user(id), + org_id INTEGER NOT NULL REFERENCES organization(id), + rstate_id INTEGER NOT NULL REFERENCES rstate(id) + ) + """)) + + conn.commit() + engine.dispose() + + +# --- Fixtures --- + +@pytest.fixture +def migration_db(tmp_path): + """ + Provide a temporary database URI and app for migration testing. + Yields (app, db_uri) and cleans up after. + """ + db_path = str(tmp_path / "test_migration.db") + db_uri = f"sqlite:///{db_path}" + app = _create_app(db_uri) + yield app, db_uri + + +# --- Tests --- + +class TestFreshInstall: + """Test migration on a completely empty database.""" + + def test_creates_all_tables(self, migration_db): + app, db_uri = migration_db + _run_migration(app) + tables = _get_tables(db_uri) + for table in EXPECTED_TABLES: + assert table in tables, f"Missing table: {table}" + + def test_seeds_roles(self, migration_db): + app, db_uri = migration_db + _run_migration(app) + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM role") == 3 + + def test_seeds_rstates(self, migration_db): + app, db_uri = migration_db + _run_migration(app) + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM rstate") == 4 + assert _query_scalar(db_uri, "SELECT description FROM rstate WHERE id = 4") == "whitelisted rule" + + def test_seeds_actions(self, migration_db): + app, db_uri = migration_db + _run_migration(app) + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM action") == 4 + + def test_seeds_communities(self, migration_db): + app, db_uri = migration_db + _run_migration(app) + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM community") == 3 + + def test_all_expected_columns(self, migration_db): + app, db_uri = migration_db + _run_migration(app) + for table_name, expected_cols in EXPECTED_COLUMNS.items(): + actual_cols = _get_columns(db_uri,table_name) + for col in expected_cols: + assert col in actual_cols, ( + f"Missing column {table_name}.{col}" + ) + + +class TestIdempotent: + """Test that running migration twice doesn't fail.""" + + def test_double_upgrade_succeeds(self, migration_db): + app, db_uri = migration_db + _run_migration(app) + # Second run should not raise + _run_migration(app) + + def test_double_upgrade_preserves_seed_data(self, migration_db): + app, db_uri = migration_db + _run_migration(app) + _run_migration(app) + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM role") == 3 + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM rstate") == 4 + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM action") == 4 + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM community") == 3 + + +class TestUpgradeFromV04: + """Test migration from approximately v0.4 schema.""" + + def test_adds_missing_columns(self, migration_db): + app, db_uri = migration_db + _create_v04_schema(db_uri) + _run_migration(app) + + # Check all expected columns exist after migration + for table_name, expected_cols in EXPECTED_COLUMNS.items(): + actual_cols = _get_columns(db_uri,table_name) + for col in expected_cols: + assert col in actual_cols, ( + f"Missing column {table_name}.{col} after v0.4 upgrade" + ) + + def test_adds_log_author(self, migration_db): + app, db_uri = migration_db + _create_v04_schema(db_uri) + + # Verify author is missing before migration + assert "author" not in _get_columns(db_uri,"log") + + _run_migration(app) + assert "author" in _get_columns(db_uri,"log") + + def test_adds_community_columns(self, migration_db): + app, db_uri = migration_db + _create_v04_schema(db_uri) + + # Verify columns are missing before migration + community_cols = _get_columns(db_uri,"community") + assert "comm" not in community_cols + assert "as_path" not in community_cols + + _run_migration(app) + community_cols = _get_columns(db_uri,"community") + assert "comm" in community_cols + assert "larcomm" in community_cols + assert "extcomm" in community_cols + assert "as_path" in community_cols + + def test_adds_flowspec4_fragment_and_org_id(self, migration_db): + app, db_uri = migration_db + _create_v04_schema(db_uri) + + cols = _get_columns(db_uri,"flowspec4") + assert "fragment" not in cols + assert "org_id" not in cols + + _run_migration(app) + cols = _get_columns(db_uri,"flowspec4") + assert "fragment" in cols + assert "org_id" in cols + + def test_adds_rstate_whitelisted(self, migration_db): + app, db_uri = migration_db + _create_v04_schema(db_uri) + + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM rstate") == 3 + + _run_migration(app) + + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM rstate WHERE id = 4") == 1 + + def test_creates_missing_tables(self, migration_db): + app, db_uri = migration_db + _create_v04_schema(db_uri) + + tables_before = _get_tables(db_uri) + assert "whitelist" not in tables_before + assert "as_path" not in tables_before + assert "machine_api_key" not in tables_before + assert "rule_whitelist_cache" not in tables_before + + _run_migration(app) + + tables_after = _get_tables(db_uri) + assert "whitelist" in tables_after + assert "as_path" in tables_after + assert "machine_api_key" in tables_after + assert "rule_whitelist_cache" in tables_after + + def test_adds_organization_limit_columns(self, migration_db): + app, db_uri = migration_db + _create_v04_schema(db_uri) + + cols = _get_columns(db_uri,"organization") + assert "limit_flowspec4" not in cols + + _run_migration(app) + cols = _get_columns(db_uri,"organization") + assert "limit_flowspec4" in cols + assert "limit_flowspec6" in cols + assert "limit_rtbh" in cols + + def test_adds_api_key_columns(self, migration_db): + app, db_uri = migration_db + _create_v04_schema(db_uri) + + cols = _get_columns(db_uri,"api_key") + assert "comment" not in cols + assert "readonly" not in cols + assert "org_id" not in cols + + _run_migration(app) + cols = _get_columns(db_uri,"api_key") + assert "comment" in cols + assert "readonly" in cols + assert "expires" in cols + assert "org_id" in cols + + +class TestUpgradeFromV08: + """Test migration from approximately v0.8 schema.""" + + def test_adds_org_id_to_rules(self, migration_db): + app, db_uri = migration_db + _create_v08_schema(db_uri) + + for table in ("flowspec4", "flowspec6", "RTBH"): + assert "org_id" not in _get_columns(db_uri,table) + + _run_migration(app) + + for table in ("flowspec4", "flowspec6", "RTBH"): + assert "org_id" in _get_columns(db_uri,table), ( + f"Missing org_id on {table} after v0.8 upgrade" + ) + + def test_adds_community_as_path(self, migration_db): + app, db_uri = migration_db + _create_v08_schema(db_uri) + + assert "as_path" not in _get_columns(db_uri,"community") + + _run_migration(app) + assert "as_path" in _get_columns(db_uri,"community") + + def test_adds_api_key_comment_and_org_id(self, migration_db): + app, db_uri = migration_db + _create_v08_schema(db_uri) + + cols = _get_columns(db_uri,"api_key") + assert "comment" not in cols + assert "org_id" not in cols + + _run_migration(app) + cols = _get_columns(db_uri,"api_key") + assert "comment" in cols + assert "org_id" in cols + + def test_adds_rstate_whitelisted(self, migration_db): + app, db_uri = migration_db + _create_v08_schema(db_uri) + _run_migration(app) + assert _query_scalar(db_uri, "SELECT description FROM rstate WHERE id = 4") == "whitelisted rule" + + +class TestUpgradeFromV10: + """Test migration from approximately v1.0 schema.""" + + def test_adds_community_as_path(self, migration_db): + app, db_uri = migration_db + _create_v10_schema(db_uri) + assert "as_path" not in _get_columns(db_uri, "community") + _run_migration(app) + assert "as_path" in _get_columns(db_uri, "community") + + def test_creates_missing_tables(self, migration_db): + app, db_uri = migration_db + _create_v10_schema(db_uri) + tables = _get_tables(db_uri) + assert "whitelist" not in tables + assert "as_path" not in tables + assert "rule_whitelist_cache" not in tables + _run_migration(app) + tables = _get_tables(db_uri) + assert "whitelist" in tables + assert "as_path" in tables + assert "rule_whitelist_cache" in tables + + def test_adds_rstate_whitelisted(self, migration_db): + app, db_uri = migration_db + _create_v10_schema(db_uri) + _run_migration(app) + assert _query_scalar(db_uri, "SELECT description FROM rstate WHERE id = 4") == "whitelisted rule" + + def test_existing_schema_unchanged(self, migration_db): + """Tables that already have all columns should not be modified.""" + app, db_uri = migration_db + _create_v10_schema(db_uri) + flowspec4_cols_before = _get_columns(db_uri, "flowspec4") + assert "org_id" in flowspec4_cols_before + assert "fragment" in flowspec4_cols_before + _run_migration(app) + flowspec4_cols_after = _get_columns(db_uri, "flowspec4") + assert "org_id" in flowspec4_cols_after + assert "fragment" in flowspec4_cols_after + + +class TestDataPreservation: + """Test that existing data survives migration.""" + + def test_preserves_existing_users(self, migration_db): + app, db_uri = migration_db + _create_v04_schema(db_uri) + _run_migration(app) + assert _query_scalar(db_uri, "SELECT uuid FROM user WHERE id = 1") == "test@test.cz" + + def test_preserves_existing_organizations(self, migration_db): + app, db_uri = migration_db + _create_v04_schema(db_uri) + _run_migration(app) + assert _query_scalar(db_uri, "SELECT name FROM organization WHERE id = 1") == "TestOrg" + + def test_preserves_existing_community(self, migration_db): + app, db_uri = migration_db + _create_v04_schema(db_uri) + _run_migration(app) + assert _query_scalar(db_uri, "SELECT name FROM community WHERE id = 1") == "65535:65283" + + def test_preserves_existing_roles(self, migration_db): + """Existing roles should not be duplicated.""" + app, db_uri = migration_db + _create_v04_schema(db_uri) + _run_migration(app) + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM role") == 3 + + def test_does_not_duplicate_seed_on_existing(self, migration_db): + """Seed data should not be inserted when tables already have data.""" + app, db_uri = migration_db + _create_v04_schema(db_uri) + _run_migration(app) + # Actions had 1 row from v0.4 setup, should not get 4 more + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM action") == 1 + # Communities had 1 row from v0.4 setup, should not get 3 more + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM community") == 1 + + +class TestUpgradeFromRealBackup: + """Test migration against exact schema from 2019-02-14 production backup. + + This uses the real DDL extracted from flowspec_db_190214.sql to ensure + the migration handles actual production databases, not just our + synthetic test schemas. + + The backup includes an alembic_version table with old revision + '7a816ca986b3' from user-generated migrations. Per the documented + upgrade procedure, this must be cleared before running flask db upgrade. + """ + + def _setup_and_migrate(self, migration_db): + """Create 2019 schema, clear old alembic_version, run migration.""" + app, db_uri = migration_db + _create_real_2019_schema(db_uri) + _clear_alembic_version(db_uri) + _run_migration(app) + return app, db_uri + + def test_migration_succeeds(self, migration_db): + self._setup_and_migrate(migration_db) + + def test_all_tables_created(self, migration_db): + _, db_uri = self._setup_and_migrate(migration_db) + tables = _get_tables(db_uri) + for table in EXPECTED_TABLES: + assert table in tables, f"Missing table: {table}" + + def test_all_expected_columns(self, migration_db): + _, db_uri = self._setup_and_migrate(migration_db) + for table_name, expected_cols in EXPECTED_COLUMNS.items(): + actual_cols = _get_columns(db_uri, table_name) + for col in expected_cols: + assert col in actual_cols, ( + f"Missing column {table_name}.{col} after 2019 backup upgrade" + ) + + def test_adds_missing_log_author(self, migration_db): + app, db_uri = migration_db + _create_real_2019_schema(db_uri) + assert "author" not in _get_columns(db_uri, "log") + _clear_alembic_version(db_uri) + _run_migration(app) + assert "author" in _get_columns(db_uri, "log") + + def test_adds_missing_community_columns(self, migration_db): + """2019 community had 'command' column but no comm/larcomm/extcomm/as_path.""" + app, db_uri = migration_db + _create_real_2019_schema(db_uri) + cols = _get_columns(db_uri, "community") + assert "command" in cols # old column present + assert "comm" not in cols + assert "as_path" not in cols + _clear_alembic_version(db_uri) + _run_migration(app) + cols = _get_columns(db_uri, "community") + assert "command" in cols # old column still present (not dropped) + assert "comm" in cols + assert "larcomm" in cols + assert "extcomm" in cols + assert "as_path" in cols + + def test_adds_missing_flowspec4_columns(self, migration_db): + app, db_uri = migration_db + _create_real_2019_schema(db_uri) + cols = _get_columns(db_uri, "flowspec4") + assert "fragment" not in cols + assert "org_id" not in cols + _clear_alembic_version(db_uri) + _run_migration(app) + cols = _get_columns(db_uri, "flowspec4") + assert "fragment" in cols + assert "org_id" in cols + + def test_adds_missing_org_id_to_all_rules(self, migration_db): + app, db_uri = migration_db + _create_real_2019_schema(db_uri) + for table in ("flowspec4", "flowspec6", "RTBH"): + assert "org_id" not in _get_columns(db_uri, table) + _clear_alembic_version(db_uri) + _run_migration(app) + for table in ("flowspec4", "flowspec6", "RTBH"): + assert "org_id" in _get_columns(db_uri, table), ( + f"Missing org_id on {table} after 2019 backup upgrade" + ) + + def test_adds_missing_api_key_columns(self, migration_db): + app, db_uri = migration_db + _create_real_2019_schema(db_uri) + cols = _get_columns(db_uri, "api_key") + assert "readonly" not in cols + assert "org_id" not in cols + _clear_alembic_version(db_uri) + _run_migration(app) + cols = _get_columns(db_uri, "api_key") + assert "readonly" in cols + assert "expires" in cols + assert "comment" in cols + assert "org_id" in cols + + def test_adds_organization_limits(self, migration_db): + app, db_uri = migration_db + _create_real_2019_schema(db_uri) + assert "limit_flowspec4" not in _get_columns(db_uri, "organization") + _clear_alembic_version(db_uri) + _run_migration(app) + cols = _get_columns(db_uri, "organization") + assert "limit_flowspec4" in cols + assert "limit_flowspec6" in cols + assert "limit_rtbh" in cols + + def test_adds_rstate_whitelisted(self, migration_db): + app, db_uri = migration_db + _create_real_2019_schema(db_uri) + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM rstate") == 3 + _clear_alembic_version(db_uri) + _run_migration(app) + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM rstate") == 4 + assert _query_scalar( + db_uri, "SELECT description FROM rstate WHERE id = 4" + ) == "whitelisted rule" + + def test_preserves_existing_users(self, migration_db): + _, db_uri = self._setup_and_migrate(migration_db) + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM user") == 3 + assert _query_scalar( + db_uri, "SELECT uuid FROM user WHERE id = 1" + ) == "alice@example.edu" + + def test_preserves_existing_rules(self, migration_db): + _, db_uri = self._setup_and_migrate(migration_db) + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM flowspec4") == 2 + assert _query_scalar( + db_uri, "SELECT source FROM flowspec4 WHERE id = 16" + ) == "203.0.113.0" + + def test_preserves_existing_logs(self, migration_db): + _, db_uri = self._setup_and_migrate(migration_db) + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM log") == 2 + + def test_preserves_existing_communities(self, migration_db): + """Existing communities should not be overwritten or duplicated.""" + _, db_uri = self._setup_and_migrate(migration_db) + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM community") == 2 + assert _query_scalar( + db_uri, "SELECT name FROM community WHERE id = 4" + ) == "RTBH IXP" + + def test_does_not_duplicate_seed_data(self, migration_db): + """Seed data should not be inserted when tables already have data.""" + _, db_uri = self._setup_and_migrate(migration_db) + # Roles: had 3 from backup, should still have 3 + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM role") == 3 + # Actions: had 3 from backup, should still have 3 + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM action") == 3 + # Communities: had 2 from backup, should still have 2 + assert _query_scalar(db_uri, "SELECT COUNT(*) FROM community") == 2 + + def test_alembic_version_updated(self, migration_db): + """After clearing old revision and upgrading, version should be baseline.""" + _, db_uri = self._setup_and_migrate(migration_db) + assert _query_scalar( + db_uri, "SELECT version_num FROM alembic_version" + ) == "001_baseline" + + def test_fails_without_clearing_old_revision(self, migration_db): + """Migration should fail if old alembic_version is not cleared first.""" + app, db_uri = migration_db + _create_real_2019_schema(db_uri) + # Do NOT clear alembic_version — migration should fail + with pytest.raises(SystemExit): + _run_migration(app)