diff --git a/.gitignore b/.gitignore index b8b14126..3813b9ec 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,6 @@ config.py instance_config_override.py run.py -migrations/ # PyPi .pypirc diff --git a/CHANGELOG.md b/CHANGELOG.md index dad22def..aa473cf4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,21 @@ All notable changes to ExaFS will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.2.2] - 2026-02-16 + +### Changed +- **Database migrations now tracked in git** — `migrations/` removed from `.gitignore` +- Replaced `db-init.py` with migration-based initialization (`flask db upgrade`) +- Removed one-time `/admin/set-org-if-zero` endpoint, replaced with standalone `scripts/migrate_v0x_to_v1.py` +- Fixed Flask-SQLAlchemy deprecation warning in Alembic `env.py` +- Template URLs changed to use `url_for` helper, removed unused `rule.html` template + +### Added +- Idempotent baseline migration (`001_baseline`) that brings any ExaFS database (from v0.4+ to current) to the v1.2.2 schema +- Optional `scripts/migrate_v0x_to_v1.py` helper for v0.x to v1.0+ data migration (org_id backfill) +- `db-init.py --reset` flag for development database reset +- `PYTHONPATH` set in Docker dev container for easier development + ## [1.2.1] - 2026-01-30 ### Fixed @@ -286,6 +301,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Route Distinguisher for VRF now supported - See config example and update your `config.py` +[1.2.2]: https://github.com/CESNET/exafs/compare/v1.2.1...v1.2.2 +[1.2.1]: https://github.com/CESNET/exafs/compare/v1.2.0...v1.2.1 [1.2.0]: https://github.com/CESNET/exafs/compare/v1.1.9...v1.2.0 [1.1.9]: https://github.com/CESNET/exafs/compare/v1.1.8...v1.1.9 [1.1.8]: https://github.com/CESNET/exafs/compare/v1.1.7...v1.1.8 diff --git a/CLAUDE.md b/CLAUDE.md index 89728a23..1873abef 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -115,7 +115,9 @@ exafs/ ├── config.example.py # Configuration template ├── instance_config_override.example.py # Dashboard override template ├── run.example.py # Application run script template -├── db-init.py # Database initialization script +├── db-init.py # Database initialization (runs flask db upgrade) +├── scripts/ +│ └── migrate_v0x_to_v1.py # Optional v0.x to v1.0+ migration helper ├── pyproject.toml # Project metadata and dependencies ├── setup.cfg # Setup configuration ├── CHANGELOG.md # Version history @@ -283,7 +285,7 @@ cp run.example.py run.py # Edit config.py with database credentials and settings -# Initialize database +# Initialize database (runs flask db upgrade) python db-init.py # Run tests @@ -295,8 +297,10 @@ python run.py ### Database Migrations +Migration files are tracked in `migrations/versions/` and committed to git. + ```bash -# Create a new migration +# Create a new migration after model changes flask db migrate -m "Description of changes" # Apply migrations @@ -304,6 +308,9 @@ flask db upgrade # Rollback migration flask db downgrade + +# For existing databases adopting migrations for the first time +flask db stamp 001_baseline ``` ### Running Tests @@ -788,7 +795,9 @@ flask db upgrade # Apply migrations flake8 . # Lint code # Database -python db-init.py # Initialize database +python db-init.py # Initialize database (runs migrations) +python db-init.py --reset # Drop all tables and recreate (dev only) +flask db stamp 001_baseline # Mark existing DB as baseline flask db current # Show current migration flask db history # Show migration history @@ -804,7 +813,7 @@ supervisorctl status # Check status When working with this codebase: 1. **Always run tests** after making changes: `pytest` -2. **Create migrations** for model changes: `flask db migrate` +2. **Create migrations** for model changes: `flask db migrate` — commit migration files to git 3. **Follow the service layer pattern** - business logic goes in services, not views 4. **Use existing validators** in `flowapp/validators.py` for validation 5. **Check authentication** - most routes need `@auth_required` decorator diff --git a/db-init.py b/db-init.py index 2c8fd680..19ff813b 100644 --- a/db-init.py +++ b/db-init.py @@ -1,39 +1,55 @@ +""" +Initialize the ExaFS database using Alembic migrations. -from flask import Flask -from flowapp import db -from flowapp.models import * +Usage: + python db-init.py # Create database from baseline migration + python db-init.py --reset # Drop all tables first, then recreate (DESTRUCTIVE) +""" -import config +import sys from os import environ +from flask_migrate import upgrade +from flowapp import create_app, db + +import config -def create_app(): - app = Flask('FlowSpecDB init') - # Configurations - try: - env = environ['USERNAME'] - except KeyError as e: - env = 'Production' - if env == 'albert': - print("DEVEL") - app.config.from_object(config.DevelopmentConfig) +def init_db(reset=False): + exafs_env = environ.get("EXAFS_ENV", "Production").lower() + if exafs_env in ("devel", "development"): + app = create_app(config.DevelopmentConfig) else: - print("PRODUCTION") - app.config.from_object(config.ProductionConfig) + app = create_app(config.ProductionConfig) db.init_app(app) with app.app_context(): - print("#: cleaning database") - db.reflect() - db.drop_all() - print("#: creating tables") - db.create_all() - - - return app - - -if __name__ == '__main__': - create_app().app_context().push() + if reset: + print("#: WARNING - dropping all tables") + db.reflect() + db.drop_all() + # Also remove alembic_version if it exists + from sqlalchemy import text + + try: + db.session.execute(text("DROP TABLE IF EXISTS alembic_version")) + db.session.commit() + except Exception: + db.session.rollback() + + print("#: running migrations (flask db upgrade)") + upgrade() + print("#: database initialized successfully") + + +if __name__ == "__main__": + reset = "--reset" in sys.argv + if reset: + print("Reset mode: all existing data will be DESTROYED.") + confirm = input("Are you sure? (yes/no): ") + if confirm.lower() != "yes": + print("Aborted.") + sys.exit(0) + + init_db(reset=reset) diff --git a/docs/DB_MIGRATIONS.md b/docs/DB_MIGRATIONS.md index 93b8de3d..204c5c9f 100644 --- a/docs/DB_MIGRATIONS.md +++ b/docs/DB_MIGRATIONS.md @@ -1,35 +1,108 @@ -# How to Upgrade the Database +# Database Migrations -## General Guidelines -Migrations can be inconsistent. To avoid issues, we removed migrations from git repostory. To start the migration on your server, it is recomended reset the migration state on the server and run the migration based on the updated database models when switching application versions via Git. +ExaFS uses [Flask-Migrate](https://flask-migrate.readthedocs.io/) (Alembic) for database schema management. Migration files are shipped inside the `flowapp` package (`flowapp/migrations/`) and are found automatically — no `flask db init` is needed. + +## New Installation + +For a fresh database, run the migrations to create all tables and seed data: ```bash -rm -rf migrations/ +flask db upgrade ``` -```SQL -DROP TABLE alembic_version; +Or use the init script: + +```bash +python db-init.py +``` + +## Upgrading Between Versions + +When upgrading ExaFS to a new version, apply any new migrations: + +```bash +flask db upgrade +``` + +This will apply only the migrations that haven't been applied yet. + +## Existing Installation (One-Time Setup) + +If you already have a running ExaFS database from any previous version, the baseline migration is idempotent — it will create missing tables, add missing columns, and skip anything that already exists. + +### Deployments that used `flask db init` (self-managed migrations) + +Some deployments previously ran `flask db init` to create a local `migrations/` directory and auto-generated migration files. Starting with v1.2.2, migration files are tracked in git and shipped with the project. To switch to the official migrations: + +1. **Delete the local migrations directory** created by `flask db init`: + ```bash + rm -rf migrations/ + ``` + Migrations are now bundled inside the `flowapp` pip package — no local directory needed. + +2. **Clear the old alembic_version** and **stamp the baseline** to register with the official migration track (your schema is already up to date): + ```sql + DELETE FROM alembic_version; + ``` + ```bash + flask db stamp 001_baseline + ``` + +3. From now on, just run `flask db upgrade` when updating ExaFS. + +### Deployments without any migration tracking + +If your database has an `alembic_version` table from a previous migration setup but no local `migrations/` directory, clear it first: + +```sql +DELETE FROM alembic_version; ``` +Then run the upgrade: + ```bash -flask db init -flask db migrate -m "Initial migration based on current DB state" flask db upgrade ``` -## Steps for Upgrading to v1.0.x -Limits for number of rules were introduced. Some database engines (Mariadb 10.x for example) have issue to set Non Null foreigin key to 0 and automatic migrations fail. The solution may be in diferent version (Mariadb 11.x works fine), or to set limits in db manually later. +The baseline migration will inspect your database and bring it up to the current schema without affecting existing data. -To set the limit to 0 for existing organizations run +## Upgrading from v0.x to v1.0+ -```SQL -UPDATE organization -SET limit_flowspec4 = 0, limit_flowspec6 = 0, limit_rtbh = 0 -WHERE limit_flowspec4 IS NULL OR limit_flowspec6 IS NULL OR limit_rtbh IS NULL; +If you are upgrading from a pre-1.0 version, the baseline migration will add the missing `org_id` columns and organization limit columns automatically. However, existing rules still need to be linked to organizations. An optional helper script is provided for this: + +```bash +python scripts/migrate_v0x_to_v1.py ``` -In all cases we need later assign rules to organizations. There's an admin endpoint for this: +This script: +1. Sets NULL organization limits to 0 +2. Helps assign existing rules to organizations based on users' organizations +3. Reports users with multiple organizations or ambiguous rule ownership that need manual assignment + +Feel free to contact jiri.vrany@cesnet.cz if you need help with the migration. + +## Creating New Migrations + +When you modify a database model, create a new migration: + +```bash +flask db migrate -m "Description of changes" +``` -`https://yourexafs.url/admin/set-org-if-zero` +Review the generated file in `flowapp/migrations/versions/`, then apply it: + +```bash +flask db upgrade +``` + +Commit the migration file to git so other deployments can apply it. + +## Development Reset + +To completely reset the database during development: + +```bash +python db-init.py --reset +``` -Or you can start with clean database and manually migrate data by SQL dump later. Feel free to contact jiri.vrany@cesnet.cz if you need help with the DB migration to 1.0.x. +This drops all tables and recreates them from scratch. **Do not use in production.** diff --git a/flowapp/__about__.py b/flowapp/__about__.py index 6cbfe4c3..d5b95ac1 100755 --- a/flowapp/__about__.py +++ b/flowapp/__about__.py @@ -1,4 +1,4 @@ -__version__ = "1.2.1" +__version__ = "1.2.2" __title__ = "ExaFS" __description__ = "Tool for creation, validation, and execution of ExaBGP messages." __author__ = "CESNET / Jiri Vrany, Petr Adamec, Josef Verich, Jakub Man" diff --git a/flowapp/__init__.py b/flowapp/__init__.py index 5bed6f8d..fb786615 100644 --- a/flowapp/__init__.py +++ b/flowapp/__init__.py @@ -1,4 +1,6 @@ # -*- coding: utf-8 -*- +import os + from flask import Flask, redirect, render_template, session, url_for, flash from flask_sso import SSO @@ -13,9 +15,11 @@ from .__about__ import __version__ from .instance_config import InstanceConfig +# Migrations directory lives inside the package so it ships with pip install +_migrations_dir = os.path.join(os.path.dirname(__file__), "migrations") db = SQLAlchemy() -migrate = Migrate() +migrate = Migrate(directory=_migrations_dir) csrf = CSRFProtect() ext = SSO() sess = Session() diff --git a/flowapp/migrations/README b/flowapp/migrations/README new file mode 100644 index 00000000..0e048441 --- /dev/null +++ b/flowapp/migrations/README @@ -0,0 +1 @@ +Single-database configuration for Flask. diff --git a/flowapp/migrations/alembic.ini b/flowapp/migrations/alembic.ini new file mode 100644 index 00000000..ec9d45c2 --- /dev/null +++ b/flowapp/migrations/alembic.ini @@ -0,0 +1,50 @@ +# A generic, single database configuration. + +[alembic] +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic,flask_migrate + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[logger_flask_migrate] +level = INFO +handlers = +qualname = flask_migrate + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/flowapp/migrations/env.py b/flowapp/migrations/env.py new file mode 100644 index 00000000..fc132b98 --- /dev/null +++ b/flowapp/migrations/env.py @@ -0,0 +1,108 @@ +import logging +from logging.config import fileConfig + +from flask import current_app + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +fileConfig(config.config_file_name) +logger = logging.getLogger('alembic.env') + + +def get_engine(): + return current_app.extensions['migrate'].db.engine + + +def get_engine_url(): + try: + return get_engine().url.render_as_string(hide_password=False).replace( + '%', '%%') + except AttributeError: + return str(get_engine().url).replace('%', '%%') + + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +config.set_main_option('sqlalchemy.url', get_engine_url()) +target_db = current_app.extensions['migrate'].db + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def get_metadata(): + if hasattr(target_db, 'metadatas'): + return target_db.metadatas[None] + return target_db.metadata + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, target_metadata=get_metadata(), literal_binds=True + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + + # this callback is used to prevent an auto-migration from being generated + # when there are no changes to the schema + # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html + def process_revision_directives(context, revision, directives): + if getattr(config.cmd_opts, 'autogenerate', False): + script = directives[0] + if script.upgrade_ops.is_empty(): + directives[:] = [] + logger.info('No changes in schema detected.') + + conf_args = current_app.extensions['migrate'].configure_args + if conf_args.get("process_revision_directives") is None: + conf_args["process_revision_directives"] = process_revision_directives + + connectable = get_engine() + + with connectable.connect() as connection: + context.configure( + connection=connection, + target_metadata=get_metadata(), + **conf_args + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/flowapp/migrations/script.py.mako b/flowapp/migrations/script.py.mako new file mode 100644 index 00000000..2c015630 --- /dev/null +++ b/flowapp/migrations/script.py.mako @@ -0,0 +1,24 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} +branch_labels = ${repr(branch_labels)} +depends_on = ${repr(depends_on)} + + +def upgrade(): + ${upgrades if upgrades else "pass"} + + +def downgrade(): + ${downgrades if downgrades else "pass"} diff --git a/flowapp/migrations/versions/001_baseline.py b/flowapp/migrations/versions/001_baseline.py new file mode 100644 index 00000000..9ad7859c --- /dev/null +++ b/flowapp/migrations/versions/001_baseline.py @@ -0,0 +1,560 @@ +"""Baseline migration - complete schema for ExaFS v1.2.2 + +Idempotent migration that brings any ExaFS database to the v1.2.2 schema. +- For new installations: creates all tables and seed data +- For existing installations: creates missing tables, adds missing columns, + skips anything that already exists + +Usage: + New install: flask db upgrade + Existing install: DELETE FROM alembic_version; flask db upgrade + +Revision ID: 001_baseline +Revises: +Create Date: 2026-02-13 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "001_baseline" +down_revision = None +branch_labels = None +depends_on = None + + +def _table_exists(table_name): + """Check if a table exists in the current database.""" + conn = op.get_bind() + return sa.inspect(conn).has_table(table_name) + + +def _column_exists(table_name, column_name): + """Check if a column exists in a table.""" + conn = op.get_bind() + columns = [c["name"] for c in sa.inspect(conn).get_columns(table_name)] + return column_name in columns + + +def _table_has_data(table_name): + """Check if a table has any rows.""" + conn = op.get_bind() + table_clause = sa.table(table_name) + stmt = sa.select(sa.func.count()).select_from(table_clause) + result = conn.execute(stmt) + return result.scalar() > 0 + + +def upgrade(): + # --- Tables with no foreign key dependencies --- + + if not _table_exists("role"): + role_table = op.create_table( + "role", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("name", sa.String(length=20), unique=True), + sa.Column("description", sa.String(length=260)), + ) + _seed_roles = True + else: + role_table = sa.table( + "role", + sa.column("name", sa.String), + sa.column("description", sa.String), + ) + _seed_roles = False + + if not _table_exists("organization"): + organization_table = op.create_table( + "organization", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("name", sa.String(length=150), unique=True), + sa.Column("arange", sa.Text()), + sa.Column("limit_flowspec4", sa.Integer(), default=0), + sa.Column("limit_flowspec6", sa.Integer(), default=0), + sa.Column("limit_rtbh", sa.Integer(), default=0), + ) + else: + organization_table = None + # Add limit columns if missing (pre-v1.0 databases) + for col_name in ("limit_flowspec4", "limit_flowspec6", "limit_rtbh"): + if not _column_exists("organization", col_name): + op.add_column("organization", sa.Column(col_name, sa.Integer(), default=0)) + + if not _table_exists("rstate"): + rstate_table = op.create_table( + "rstate", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("description", sa.String(length=260)), + ) + _seed_rstates = True + else: + rstate_table = sa.table( + "rstate", + sa.column("description", sa.String), + ) + _seed_rstates = False + + if not _table_exists("user"): + op.create_table( + "user", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("uuid", sa.String(length=180), unique=True), + sa.Column("comment", sa.String(length=500)), + sa.Column("email", sa.String(length=255)), + sa.Column("name", sa.String(length=255)), + sa.Column("phone", sa.String(length=255)), + ) + + if not _table_exists("as_path"): + op.create_table( + "as_path", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("prefix", sa.String(length=120), unique=True), + sa.Column("as_path", sa.String(length=250)), + ) + + if not _table_exists("log"): + op.create_table( + "log", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("time", sa.DateTime()), + sa.Column("task", sa.String(length=1000)), + sa.Column("author", sa.String(length=1000)), + sa.Column("rule_type", sa.Integer()), + sa.Column("rule_id", sa.Integer()), + sa.Column("user_id", sa.Integer()), + ) + else: + # Add author column if missing (pre-v0.5 databases) + if not _column_exists("log", "author"): + op.add_column( + "log", + sa.Column("author", sa.String(length=1000)), + ) + + # --- Junction tables --- + + if not _table_exists("user_role"): + op.create_table( + "user_role", + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column("role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False), + sa.PrimaryKeyConstraint("user_id", "role_id"), + ) + + if not _table_exists("user_organization"): + op.create_table( + "user_organization", + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "organization_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.PrimaryKeyConstraint("user_id", "organization_id"), + ) + + # --- Tables with foreign key to role --- + + if not _table_exists("action"): + action_table = op.create_table( + "action", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("name", sa.String(length=120), unique=True), + sa.Column("command", sa.String(length=120), unique=True), + sa.Column("description", sa.String(length=260)), + sa.Column("role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False), + ) + _seed_actions = True + else: + action_table = sa.table( + "action", + sa.column("name", sa.String), + sa.column("command", sa.String), + sa.column("description", sa.String), + sa.column("role_id", sa.Integer), + ) + _seed_actions = False + + if not _table_exists("community"): + community_table = op.create_table( + "community", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("name", sa.String(length=120), unique=True), + sa.Column("comm", sa.String(length=2047)), + sa.Column("larcomm", sa.String(length=2047)), + sa.Column("extcomm", sa.String(length=2047)), + sa.Column("description", sa.String(length=255)), + sa.Column("as_path", sa.Boolean(), default=False), + sa.Column("role_id", sa.Integer(), sa.ForeignKey("role.id"), nullable=False), + ) + _seed_communities = True + else: + community_table = sa.table( + "community", + sa.column("name", sa.String), + sa.column("comm", sa.String), + sa.column("larcomm", sa.String), + sa.column("extcomm", sa.String), + sa.column("description", sa.String), + sa.column("as_path", sa.Boolean), + sa.column("role_id", sa.Integer), + ) + _seed_communities = False + # Add community columns if missing (pre-v0.7 databases) + for col_name in ("comm", "larcomm", "extcomm"): + if not _column_exists("community", col_name): + op.add_column( + "community", + sa.Column(col_name, sa.String(length=2047)), + ) + # Add as_path column if missing (pre-v1.1 databases) + if not _column_exists("community", "as_path"): + op.add_column( + "community", + sa.Column("as_path", sa.Boolean(), default=False), + ) + + # --- API key tables --- + + if not _table_exists("api_key"): + op.create_table( + "api_key", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("machine", sa.String(length=255)), + sa.Column("key", sa.String(length=255)), + sa.Column("readonly", sa.Boolean(), default=False), + sa.Column("expires", sa.DateTime(), nullable=True), + sa.Column("comment", sa.String(length=255)), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + ) + else: + # Add columns introduced after initial api_key creation + for col_name, col_type, col_default in [ + ("comment", sa.String(length=255), None), + ("readonly", sa.Boolean(), False), + ("expires", sa.DateTime(), None), + ]: + if not _column_exists("api_key", col_name): + op.add_column( + "api_key", + sa.Column(col_name, col_type, default=col_default), + ) + if not _column_exists("api_key", "org_id"): + op.add_column( + "api_key", + sa.Column( + "org_id", + sa.Integer(), + nullable=True, + server_default="0", + ), + ) + + if not _table_exists("machine_api_key"): + op.create_table( + "machine_api_key", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("machine", sa.String(length=255)), + sa.Column("key", sa.String(length=255)), + sa.Column("readonly", sa.Boolean(), default=True), + sa.Column("expires", sa.DateTime(), nullable=True), + sa.Column("comment", sa.String(length=255)), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + ) + else: + # Ensure machine_api_key has all expected columns + if not _column_exists("machine_api_key", "org_id"): + op.add_column( + "machine_api_key", + sa.Column( + "org_id", + sa.Integer(), + nullable=True, + ), + ) + + # --- Rule tables --- + + if not _table_exists("flowspec4"): + op.create_table( + "flowspec4", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("source", sa.String(length=255)), + sa.Column("source_mask", sa.Integer()), + sa.Column("source_port", sa.String(length=255)), + sa.Column("dest", sa.String(length=255)), + sa.Column("dest_mask", sa.Integer()), + sa.Column("dest_port", sa.String(length=255)), + sa.Column("protocol", sa.String(length=255)), + sa.Column("flags", sa.String(length=255)), + sa.Column("packet_len", sa.String(length=255)), + sa.Column("fragment", sa.String(length=255)), + sa.Column("comment", sa.Text()), + sa.Column("expires", sa.DateTime()), + sa.Column("created", sa.DateTime()), + sa.Column("action_id", sa.Integer(), sa.ForeignKey("action.id"), nullable=False), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.Column("rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False), + ) + else: + if not _column_exists("flowspec4", "fragment"): + op.add_column( + "flowspec4", + sa.Column("fragment", sa.String(length=255)), + ) + if not _column_exists("flowspec4", "org_id"): + op.add_column( + "flowspec4", + sa.Column( + "org_id", + sa.Integer(), + nullable=True, + ), + ) + + if not _table_exists("flowspec6"): + op.create_table( + "flowspec6", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("source", sa.String(length=255)), + sa.Column("source_mask", sa.Integer()), + sa.Column("source_port", sa.String(length=255)), + sa.Column("dest", sa.String(length=255)), + sa.Column("dest_mask", sa.Integer()), + sa.Column("dest_port", sa.String(length=255)), + sa.Column("next_header", sa.String(length=255)), + sa.Column("flags", sa.String(length=255)), + sa.Column("packet_len", sa.String(length=255)), + sa.Column("comment", sa.Text()), + sa.Column("expires", sa.DateTime()), + sa.Column("created", sa.DateTime()), + sa.Column("action_id", sa.Integer(), sa.ForeignKey("action.id"), nullable=False), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.Column("rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False), + ) + else: + if not _column_exists("flowspec6", "org_id"): + op.add_column( + "flowspec6", + sa.Column( + "org_id", + sa.Integer(), + nullable=True, + ), + ) + + if not _table_exists("RTBH"): + op.create_table( + "RTBH", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("ipv4", sa.String(length=255)), + sa.Column("ipv4_mask", sa.Integer()), + sa.Column("ipv6", sa.String(length=255)), + sa.Column("ipv6_mask", sa.Integer()), + sa.Column( + "community_id", + sa.Integer(), + sa.ForeignKey("community.id"), + nullable=False, + ), + sa.Column("comment", sa.Text()), + sa.Column("expires", sa.DateTime()), + sa.Column("created", sa.DateTime()), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.Column("rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False), + ) + else: + if not _column_exists("RTBH", "org_id"): + op.add_column( + "RTBH", + sa.Column( + "org_id", + sa.Integer(), + nullable=True, + ), + ) + + if not _table_exists("whitelist"): + op.create_table( + "whitelist", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("ip", sa.String(length=255)), + sa.Column("mask", sa.Integer()), + sa.Column("comment", sa.Text()), + sa.Column("expires", sa.DateTime()), + sa.Column("created", sa.DateTime()), + sa.Column("user_id", sa.Integer(), sa.ForeignKey("user.id"), nullable=False), + sa.Column( + "org_id", + sa.Integer(), + sa.ForeignKey("organization.id"), + nullable=False, + ), + sa.Column("rstate_id", sa.Integer(), sa.ForeignKey("rstate.id"), nullable=False), + ) + + if not _table_exists("rule_whitelist_cache"): + op.create_table( + "rule_whitelist_cache", + sa.Column("id", sa.Integer(), primary_key=True), + sa.Column("rid", sa.Integer()), + sa.Column("rtype", sa.Integer()), + sa.Column("rorigin", sa.Integer()), + sa.Column( + "whitelist_id", + sa.Integer(), + sa.ForeignKey("whitelist.id"), + nullable=True, + ), + ) + + # --- Seed data (only for newly created tables) --- + + if _seed_roles and not _table_has_data("role"): + op.bulk_insert( + role_table, + [ + {"name": "view", "description": "just view, no edit"}, + {"name": "user", "description": "can edit"}, + {"name": "admin", "description": "admin"}, + ], + ) + + # Ensure rstate has the "whitelisted rule" entry (id=4, added in v1.1.0) + if not _seed_rstates and _table_has_data("rstate"): + conn = op.get_bind() + result = conn.execute(sa.text("SELECT COUNT(*) FROM rstate WHERE id = 4")) + if result.scalar() == 0: + conn.execute(sa.text("INSERT INTO rstate (id, description) VALUES (4, 'whitelisted rule')")) + + if _seed_rstates and not _table_has_data("rstate"): + op.bulk_insert( + rstate_table, + [ + {"description": "active rule"}, + {"description": "withdrawed rule"}, + {"description": "deleted rule"}, + {"description": "whitelisted rule"}, + ], + ) + + if _seed_actions and not _table_has_data("action"): + op.bulk_insert( + action_table, + [ + { + "name": "QoS 100 kbps", + "command": "rate-limit 12800", + "description": "QoS", + "role_id": 2, + }, + { + "name": "QoS 1Mbps", + "command": "rate-limit 13107200", + "description": "QoS", + "role_id": 2, + }, + { + "name": "QoS 10Mbps", + "command": "rate-limit 131072000", + "description": "QoS", + "role_id": 2, + }, + { + "name": "Discard", + "command": "discard", + "description": "Discard", + "role_id": 2, + }, + ], + ) + + if _seed_communities and not _table_has_data("community"): + op.bulk_insert( + community_table, + [ + { + "name": "65535:65283", + "comm": "65535:65283", + "larcomm": "", + "extcomm": "", + "description": "local-as", + "as_path": False, + "role_id": 2, + }, + { + "name": "64496:64511", + "comm": "64496:64511", + "larcomm": "", + "extcomm": "", + "description": "", + "as_path": False, + "role_id": 2, + }, + { + "name": "64497:64510", + "comm": "64497:64510", + "larcomm": "", + "extcomm": "", + "description": "", + "as_path": False, + "role_id": 2, + }, + ], + ) + + +def downgrade(): + op.drop_table("rule_whitelist_cache") + op.drop_table("whitelist") + op.drop_table("RTBH") + op.drop_table("flowspec6") + op.drop_table("flowspec4") + op.drop_table("machine_api_key") + op.drop_table("api_key") + op.drop_table("community") + op.drop_table("action") + op.drop_table("user_organization") + op.drop_table("user_role") + op.drop_table("log") + op.drop_table("as_path") + op.drop_table("user") + op.drop_table("rstate") + op.drop_table("organization") + op.drop_table("role") diff --git a/flowapp/models/community.py b/flowapp/models/community.py index 880a837a..5df43102 100644 --- a/flowapp/models/community.py +++ b/flowapp/models/community.py @@ -45,6 +45,7 @@ class ASPath(db.Model): # Methods and initializer +# Note: seed data is also defined in migrations/versions/001_baseline.py - keep in sync @event.listens_for(Community.__table__, "after_create") def insert_initial_communities(table, conn, *args, **kwargs): conn.execute( diff --git a/flowapp/models/organization.py b/flowapp/models/organization.py index baf0ec1f..67db8c5c 100644 --- a/flowapp/models/organization.py +++ b/flowapp/models/organization.py @@ -29,6 +29,7 @@ def get_users(self): # Event listeners for Organization +# Note: seed data is also defined in migrations/versions/001_baseline.py - keep in sync @event.listens_for(Organization.__table__, "after_create") def insert_initial_organizations(table, conn, *args, **kwargs): conn.execute(table.insert().values(name="TU Liberec", arange="147.230.0.0/16\n2001:718:1c01::/48")) diff --git a/flowapp/models/rules/base.py b/flowapp/models/rules/base.py index 22fbc089..cbe889d1 100644 --- a/flowapp/models/rules/base.py +++ b/flowapp/models/rules/base.py @@ -32,6 +32,7 @@ def __init__(self, name, command, description, role_id=2): # Event listeners for Rstate +# Note: seed data is also defined in migrations/versions/001_baseline.py - keep in sync @event.listens_for(Rstate.__table__, "after_create") def insert_initial_rulestates(table, conn, *args, **kwargs): conn.execute(table.insert().values(description="active rule")) @@ -40,6 +41,7 @@ def insert_initial_rulestates(table, conn, *args, **kwargs): conn.execute(table.insert().values(description="whitelisted rule")) +# Note: seed data is also defined in migrations/versions/001_baseline.py - keep in sync @event.listens_for(Action.__table__, "after_create") def insert_initial_actions(table, conn, *args, **kwargs): conn.execute( diff --git a/flowapp/models/user.py b/flowapp/models/user.py index dcb2d7eb..78a028d4 100644 --- a/flowapp/models/user.py +++ b/flowapp/models/user.py @@ -72,6 +72,7 @@ def __repr__(self): # Event listeners for Role +# Note: seed data is also defined in migrations/versions/001_baseline.py - keep in sync @event.listens_for(Role.__table__, "after_create") def insert_initial_roles(table, conn, *args, **kwargs): conn.execute(table.insert().values(name="view", description="just view, no edit")) diff --git a/flowapp/templates/forms/rule.html b/flowapp/templates/forms/rule.html deleted file mode 100644 index 1a1c94bb..00000000 --- a/flowapp/templates/forms/rule.html +++ /dev/null @@ -1,105 +0,0 @@ -{% extends 'layouts/default.html' %} -{% block title %}Add IPv4 rule{% endblock %} -{% block content %} -
- -{% endblock %} \ No newline at end of file diff --git a/flowapp/templates/layouts/default.html b/flowapp/templates/layouts/default.html index 4ecbecd0..402f5669 100644 --- a/flowapp/templates/layouts/default.html +++ b/flowapp/templates/layouts/default.html @@ -18,7 +18,7 @@ - + {% block head %}{% endblock %} @@ -55,7 +55,7 @@ {% endfor %}