diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index df9bee3..f4328ce 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -17,7 +17,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"] steps: - uses: actions/checkout@v4 diff --git a/docs/configuring.md b/docs/configuring.md index 9e50683..645362c 100644 --- a/docs/configuring.md +++ b/docs/configuring.md @@ -79,16 +79,16 @@ table, or a child table. ### Data types By default, the data type defined in the database table for each column is based on a mapping between the data type -indicated in the XSD and a corresponding `sqlalchemy` type implemented in the following three functions: +indicated in the XSD and a corresponding `sqlalchemy` type implemented in the following three methods: -??? info "Default: `types_mapping_default`" - ::: xml2db.table.column.types_mapping_default +??? info "Default: `DatabaseDialect.column_type`" + ::: xml2db.dialect.base.DatabaseDialect.column_type -??? info "MySQL: `types_mapping_mysql`" - ::: xml2db.table.column.types_mapping_mysql +??? info "MySQL: `MySQLDialect.column_type`" + ::: xml2db.dialect.mysql.MySQLDialect.column_type -??? info "MSSQL: `types_mapping_mssql`" - ::: xml2db.table.column.types_mapping_mssql +??? info "MSSQL: `MSSQLDialect.column_type`" + ::: xml2db.dialect.mssql.MSSQLDialect.column_type You may override this mapping by specifying a column type for any field in the model config. Custom column types are defined as `sqlalchemy` types and will be passed to the `sqlalchemy.Column` constructor as is. diff --git a/pyproject.toml b/pyproject.toml index 7d38d7f..02b3ce6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,13 +4,13 @@ build-backend = "setuptools.build_meta" [project] name = "xml2db" -version = "0.12.6" +version = "0.13.0" authors = [ { name="Commission de régulation de l'énergie", email="opensource@cre.fr" }, ] description = "Import complex XML files to a relational database" readme = "README.md" -requires-python = ">=3.9" +requires-python = ">=3.10" classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", diff --git a/src/xml2db/dialect/__init__.py b/src/xml2db/dialect/__init__.py new file mode 100644 index 0000000..c783f3a --- /dev/null +++ b/src/xml2db/dialect/__init__.py @@ -0,0 +1,65 @@ +"""Backend-specific dialect classes for xml2db. + +This package centralises all database-backend-specific behaviour that was +previously scattered across the codebase as ``if db_type == "..."`` +conditionals. Each supported backend has a dedicated subclass of +:class:`~xml2db.dialect.base.DatabaseDialect`. Unknown backends fall back to +the base class, which provides safe, generic defaults. + +Usage:: + + from xml2db.dialect import get_dialect + + dialect = get_dialect("postgresql") + physical_name = dialect.db_identifier("some_very_long_xsd_derived_name") + +The registry is a plain dict so that third-party code (or tests) can register +custom dialects without subclassing anything in xml2db:: + + from xml2db.dialect import DIALECT_REGISTRY + from mypackage import OracleDialect + + DIALECT_REGISTRY["oracle"] = OracleDialect +""" + +from .base import DatabaseDialect +from .duckdb import DuckDBDialect +from .mssql import MSSQLDialect +from .mysql import MySQLDialect +from .postgresql import PostgreSQLDialect + +__all__ = [ + "DatabaseDialect", + "DuckDBDialect", + "MSSQLDialect", + "MySQLDialect", + "PostgreSQLDialect", + "DIALECT_REGISTRY", + "get_dialect", +] + +# Maps the SQLAlchemy dialect name (as returned by engine.dialect.name) to +# the corresponding DatabaseDialect subclass. +DIALECT_REGISTRY: dict[str, type[DatabaseDialect]] = { + "postgresql": PostgreSQLDialect, + "mssql": MSSQLDialect, + "mysql": MySQLDialect, + "mariadb": MySQLDialect, # SQLAlchemy reports MariaDB as "mariadb" + "duckdb": DuckDBDialect, +} + + +def get_dialect(db_type: str | None) -> DatabaseDialect: + """Return a :class:`DatabaseDialect` instance for the given backend name. + + Args: + db_type: The SQLAlchemy dialect name, e.g. ``"postgresql"``, + ``"mssql"``, ``"mysql"``, ``"duckdb"``. ``None`` or any + unrecognised string falls back to the base + :class:`DatabaseDialect`, which uses safe generic defaults. + + Returns: + An instantiated :class:`DatabaseDialect` (or subclass) ready for use. + """ + cls = DIALECT_REGISTRY.get(db_type, DatabaseDialect) + return cls() diff --git a/src/xml2db/dialect/base.py b/src/xml2db/dialect/base.py new file mode 100644 index 0000000..6789784 --- /dev/null +++ b/src/xml2db/dialect/base.py @@ -0,0 +1,315 @@ +import hashlib +import logging +from typing import Any, TYPE_CHECKING + +from sqlalchemy import ( + Column, + Integer, + PrimaryKeyConstraint, + Index, + String, + Double, + DateTime, + Boolean, + SmallInteger, + BigInteger, + LargeBinary, +) +from sqlalchemy import inspect as sqlalchemy_inspect +import sqlalchemy.schema + +if TYPE_CHECKING: + from ..table.column import DataModelColumn + +logger = logging.getLogger(__name__) + + +class DatabaseDialect: + """Encapsulates all backend-specific behaviour for xml2db. + + The base implementation provides safe, backend-agnostic defaults that work + correctly for most SQL databases. Subclasses override only the methods that + require backend-specific logic. + + Attributes: + MAX_IDENTIFIER_LENGTH: Maximum number of characters allowed in a table + or column name by this backend. Used by :meth:`db_identifier` to + decide whether truncation is needed. + """ + + MAX_IDENTIFIER_LENGTH: int = 63 # conservative default; matches PostgreSQL + + # ------------------------------------------------------------------ + # Identifier handling + # ------------------------------------------------------------------ + + def db_identifier(self, logical_name: str, temp_prefix: bool = False) -> str: + """Return the physical database identifier for a logical name. + + Names longer than :attr:`MAX_IDENTIFIER_LENGTH` are truncated using a + 7-character MD5 hash suffix, guaranteeing both uniqueness and stability + across runs. Names within the limit are returned unchanged. + + Args: + logical_name: The full logical name used inside the Python model + (e.g. ``"very_long_table_name_derived_from_xsd"``). + temp_prefix: Should we save 14 more characters for the temp prefix? + + Returns: + A string that is safe to use as a database identifier for this + backend. Guaranteed to be stable across calls with the same input. + """ + max_len = self.MAX_IDENTIFIER_LENGTH + if temp_prefix: + max_len += 14 + + if len(logical_name) <= max_len: + return logical_name + suffix = "_" + hashlib.md5(logical_name.encode()).hexdigest()[:7] + return logical_name[: max_len - len(suffix)] + suffix + + def fk_ref(self, table_logical: str, col_logical: str) -> str: + """Return a ``"table.column"`` string for use in a ``ForeignKey(...)`` call. + + SQLAlchemy resolves the table part of a ForeignKey string against + ``metadata.tables``, which is indexed by the physical table name (the + first argument to ``Table()``). The column part is resolved via + ``table.c.get()``, which uses the column *key* (the logical name when + ``key=`` is set). So the table name must be physical and the column + name must be logical. + + Args: + table_logical: Logical name of the referenced table. + col_logical: Logical name of the referenced column. + + Returns: + A ``"physical_table.logical_col"`` string ready for use in a + ``ForeignKey(...)`` call. + """ + return f"{self.db_identifier(table_logical)}.{col_logical}" + + # ------------------------------------------------------------------ + # Column type mapping + # ------------------------------------------------------------------ + + def column_type(self, col: "DataModelColumn", temp: bool) -> Any: + """Return the SQLAlchemy type for a given column. + + The base implementation provides backend-agnostic defaults. Subclasses + may override this to provide backend-specific type mappings. + + Args: + col: The :class:`~xml2db.table.column.DataModelColumn` whose type + is being resolved. + temp: ``True`` when building the temporary staging table, ``False`` + for the target table. Some backends (e.g. MSSQL) use a + different type in temp tables to work around insertion issues. + + Returns: + A SQLAlchemy type class or instance. + """ + if col.occurs[1] != 1: + return String(8000) + if col.data_type in ["decimal", "float", "double"]: + return Double + if col.data_type == "dateTime": + return DateTime(timezone=True) + if col.data_type in [ + "integer", + "int", + "nonPositiveInteger", + "nonNegativeInteger", + "positiveInteger", + "negativeInteger", + ]: + return Integer + if col.data_type == "boolean": + return Boolean + if col.data_type in ["short", "byte"]: + return SmallInteger + if col.data_type == "long": + return BigInteger + if col.data_type == "date": + return String(16) + if col.data_type == "time": + return String(18) + if col.data_type in ["string", "NMTOKEN", "duration", "token"]: + if col.max_length is None: + return String(1000) + min_length = 0 if col.min_length is None else col.min_length + if min_length >= col.max_length - 1 and not col.allow_empty: + return String(col.max_length) + return String(col.max_length) + if col.data_type == "binary": + return LargeBinary(col.max_length) + logger.warning( + f"unknown type '{col.data_type}' for column '{col.name}', defaulting to VARCHAR(1000) " + f"(this can be overridden by providing a field type in the configuration)" + ) + return String(1000) + + # ------------------------------------------------------------------ + # DDL: primary key + # ------------------------------------------------------------------ + + def pk_column(self, table_name: str) -> Column: + """Return the primary key ``Column`` for a target table. + + The base implementation uses ``autoincrement=True``, which is + supported by all major backends. DuckDB requires a ``Sequence``-based + workaround and overrides this method. + + Args: + table_name: The *logical* table name, used to build the column + name (``pk_``). Pass the logical name here; callers + that need the physical name for the ``Column`` constructor will + apply :meth:`db_identifier` separately in step 7. + + Returns: + A SQLAlchemy :class:`~sqlalchemy.Column` configured as the + primary key. + """ + logical = f"pk_{table_name}" + return Column( + self.db_identifier(logical), + Integer, + key=logical, + primary_key=True, + autoincrement=True, + ) + + def pk_constraint(self, table_name: str, **kwargs: Any) -> PrimaryKeyConstraint: + """Return the ``PrimaryKeyConstraint`` for a target table. + + Extra keyword arguments are passed through to the + ``PrimaryKeyConstraint`` constructor, allowing callers to supply + backend-specific dialect options (e.g. ``mssql_clustered``) without + this method needing to know about them. + + Args: + table_name: The *logical* table name, used to build the constraint + name (``cx_pk_``). + **kwargs: Additional keyword arguments forwarded to + ``PrimaryKeyConstraint``. + + Returns: + A :class:`~sqlalchemy.PrimaryKeyConstraint` with a deterministic + name. + """ + return PrimaryKeyConstraint(name=f"cx_pk_{table_name}", **kwargs) + + # ------------------------------------------------------------------ + # DDL: extra indexes + # ------------------------------------------------------------------ + + def extra_indexes(self, table_name: str, config: dict) -> list[Index]: + """Return any backend-specific indexes to append to a table. + + The base implementation returns an empty list. The MSSQL dialect + overrides this to return a clustered columnstore index when + ``config["as_columnstore"]`` is ``True``. + + Args: + table_name: The *logical* table name. + config: The validated per-table configuration dict (as returned + by :meth:`validate_table_config`). + + Returns: + A (possibly empty) list of SQLAlchemy :class:`~sqlalchemy.Index` + objects to be appended to the table via + ``table.append_constraint(...)``. + """ + return [] + + def relation_extra_indexes( + self, rel_table_name: str, fk_self_col: str, fk_other_col: str, config: dict + ) -> tuple: + """Return any backend-specific indexes to append to a relation table. + + The base implementation returns an empty tuple. The MSSQL dialect + overrides this to return a clustered index on the FK columns. + + Args: + rel_table_name: The *logical* relation table name. + fk_self_col: The logical name of the FK column referencing the parent table. + fk_other_col: The logical name of the FK column referencing the other table. + config: The validated per-table configuration dict. + + Returns: + A (possibly empty) tuple of SQLAlchemy :class:`~sqlalchemy.Index` objects. + """ + return tuple() + + # ------------------------------------------------------------------ + # DDL: schema management + # ------------------------------------------------------------------ + + def create_schema(self, engine: Any, schema_name: str) -> None: + """Create a database schema if it does not already exist. + + The base implementation uses SQLAlchemy's ``inspect`` to check for + schema existence before issuing ``CREATE SCHEMA``, which works for + PostgreSQL, MSSQL, and MySQL. DuckDB overrides this with a + try/except approach because its inspector does not reliably list + schemas before they are created. + + Args: + engine: The bound SQLAlchemy engine. + schema_name: Name of the schema to create. + """ + + def do_create() -> None: + with engine.connect() as conn: + conn.execute(sqlalchemy.schema.CreateSchema(schema_name)) + conn.commit() + + inspector = sqlalchemy_inspect(engine) + if schema_name not in inspector.get_schema_names(): + do_create() + + # ------------------------------------------------------------------ + # Config validation + # ------------------------------------------------------------------ + + def validate_table_config(self, config: dict) -> dict: + """Strip or warn about config keys unsupported by this backend. + + The base implementation disables ``as_columnstore`` with a warning, + since clustered columnstore indexes are an MSSQL-only feature. + ``MSSQLDialect`` overrides this to allow the option through. + + Args: + config: The raw per-table config dict, already parsed by + :meth:`~xml2db.table.table.DataModelTable._validate_config`. + + Returns: + The config dict, potentially with ``as_columnstore`` set to + ``False``. + """ + if config.get("as_columnstore"): + config["as_columnstore"] = False + logger.warning( + "Clustered columnstore indexes are only supported with MS SQL Server database" + ) + return config + + def validate_model_config(self, config: dict) -> dict: + """Strip or warn about model-level config keys unsupported by this backend. + + Mirrors :meth:`validate_table_config` but operates on the top-level + model config dict. The base implementation disables ``as_columnstore`` + with an informational log message. + + Args: + config: The raw model-level config dict, already parsed by + :meth:`~xml2db.model.DataModel._validate_config`. + + Returns: + The config dict, potentially modified. + """ + if config.get("as_columnstore"): + config["as_columnstore"] = False + logger.info( + "Clustered columnstore indexes are only supported with MS SQL Server database, noop" + ) + return config diff --git a/src/xml2db/dialect/duckdb.py b/src/xml2db/dialect/duckdb.py new file mode 100644 index 0000000..a4efa78 --- /dev/null +++ b/src/xml2db/dialect/duckdb.py @@ -0,0 +1,50 @@ +from typing import Any + +from sqlalchemy import Column, Integer, Sequence +from sqlalchemy.exc import ProgrammingError +import sqlalchemy.schema + +from .base import DatabaseDialect + + +class DuckDBDialect(DatabaseDialect): + """Dialect for DuckDB. + + DuckDB supports very long identifiers (effectively unlimited in practice; + we document 1024 as a safe upper bound). It requires two workarounds: + + - **Primary key columns**: DuckDB does not support ``autoincrement`` in the + same way as other backends. A ``Sequence`` object is used instead. + - **Schema creation**: DuckDB's inspector does not reliably list schemas + before they exist, so the existence check is replaced with a try/except + around ``CREATE SCHEMA``. + """ + + # this limit comes from the implementation with SQLAlchemy and not a constraint of duckdb per se + MAX_IDENTIFIER_LENGTH: int = 63 + + def pk_column(self, table_name: str) -> Column: + """Return a Sequence-based primary key column for DuckDB.""" + logical = f"pk_{table_name}" + pk_sequence = Sequence(self.db_identifier(f"pk_sequ_{table_name}")) + return Column( + self.db_identifier(logical), + Integer, + pk_sequence, + server_default=pk_sequence.next_value(), + primary_key=True, + key=logical, + ) + + def create_schema(self, engine: Any, schema_name: str) -> None: + """Create a schema using try/except, as required by DuckDB.""" + + def do_create() -> None: + with engine.connect() as conn: + conn.execute(sqlalchemy.schema.CreateSchema(schema_name)) + conn.commit() + + try: + do_create() + except ProgrammingError: + pass diff --git a/src/xml2db/dialect/mssql.py b/src/xml2db/dialect/mssql.py new file mode 100644 index 0000000..d5657c2 --- /dev/null +++ b/src/xml2db/dialect/mssql.py @@ -0,0 +1,75 @@ +from typing import Any, List, TYPE_CHECKING + +from sqlalchemy import Index +from sqlalchemy.dialects import mssql as mssql_dialect + +from .base import DatabaseDialect + +if TYPE_CHECKING: + from ..table.column import DataModelColumn + + +class MSSQLDialect(DatabaseDialect): + """Dialect for Microsoft SQL Server. + + MSSQL supports identifiers up to 128 characters, so no truncation is + needed. Columnstore index support and MSSQL-specific type mappings are + handled in this class. + """ + + MAX_IDENTIFIER_LENGTH: int = 128 + + def validate_table_config(self, config: dict) -> dict: + """Allow ``as_columnstore`` through unchanged for MSSQL.""" + return config + + def validate_model_config(self, config: dict) -> dict: + """Allow ``as_columnstore`` through unchanged for MSSQL.""" + return config + + def column_type(self, col: "DataModelColumn", temp: bool) -> Any: + if col.occurs[1] != 1: + return mssql_dialect.VARCHAR(8000) + if col.data_type == "dateTime": + # using DATETIMEOFFSET directly in the temporary table caused issues when inserting data + # INSERT INTO SELECT converts datetime VARCHAR to DATETIMEOFFSET without errors + return mssql_dialect.VARCHAR(100) if temp else mssql_dialect.DATETIMEOFFSET + if col.data_type == "date": + return mssql_dialect.VARCHAR(16) + if col.data_type == "time": + return mssql_dialect.VARCHAR(18) + if col.data_type in ["string", "NMTOKEN", "duration", "token"]: + if col.max_length is None: + return mssql_dialect.VARCHAR(1000) + min_length = 0 if col.min_length is None else col.min_length + if min_length >= col.max_length - 1 and not col.allow_empty: + return mssql_dialect.CHAR(col.max_length) + return mssql_dialect.VARCHAR(col.max_length) + if col.data_type == "binary": + if col.max_length == col.min_length: + return mssql_dialect.BINARY(col.max_length) + return mssql_dialect.VARBINARY(col.max_length) + return super().column_type(col, temp) + + def extra_indexes(self, table_name: str, config: dict) -> List[Index]: + if config.get("as_columnstore"): + return [ + Index( + self.db_identifier(f"idx_{table_name}_columnstore"), + mssql_clustered=True, + mssql_columnstore=True, + ) + ] + return [] + + def relation_extra_indexes( + self, rel_table_name: str, fk_self_col: str, fk_other_col: str, config: dict + ) -> tuple: + return ( + Index( + self.db_identifier(f"ix_fk_{rel_table_name}"), + self.db_identifier(fk_self_col), + self.db_identifier(fk_other_col), + mssql_clustered=True, + ), + ) diff --git a/src/xml2db/dialect/mysql.py b/src/xml2db/dialect/mysql.py new file mode 100644 index 0000000..c5d20fe --- /dev/null +++ b/src/xml2db/dialect/mysql.py @@ -0,0 +1,31 @@ +from typing import Any, TYPE_CHECKING + +from sqlalchemy import String +from sqlalchemy.dialects import mysql as mysql_dialect + +from .base import DatabaseDialect + +if TYPE_CHECKING: + from ..table.column import DataModelColumn + + +class MySQLDialect(DatabaseDialect): + """Dialect for MySQL / MariaDB. + + MySQL enforces a 64-character limit on identifiers. + """ + + # further reducing the max length because SQL Alchemy adds suffixes to foreign key names + MAX_IDENTIFIER_LENGTH: int = 56 + + def column_type(self, col: "DataModelColumn", temp: bool) -> Any: + if col.occurs[1] != 1: + return String(4000) + if col.data_type in ["string", "NMTOKEN", "duration", "token"]: + if col.max_length is None: + return String(255) + if col.data_type == "binary": + if col.max_length == col.min_length: + return mysql_dialect.BINARY(col.max_length) + return mysql_dialect.VARBINARY(col.max_length) + return super().column_type(col, temp) diff --git a/src/xml2db/dialect/postgresql.py b/src/xml2db/dialect/postgresql.py new file mode 100644 index 0000000..23a9cfe --- /dev/null +++ b/src/xml2db/dialect/postgresql.py @@ -0,0 +1,13 @@ +from .base import DatabaseDialect + + +class PostgreSQLDialect(DatabaseDialect): + """Dialect for PostgreSQL. + + PostgreSQL enforces a 63-character limit on identifiers. Names exceeding + this limit are truncated with a hash suffix by the base + :meth:`~DatabaseDialect.db_identifier` implementation, which uses + :attr:`MAX_IDENTIFIER_LENGTH` to decide when to truncate. + """ + + MAX_IDENTIFIER_LENGTH: int = 63 diff --git a/src/xml2db/model.py b/src/xml2db/model.py index 9c84f50..b2939a4 100644 --- a/src/xml2db/model.py +++ b/src/xml2db/model.py @@ -9,11 +9,11 @@ import xmlschema import sqlalchemy from lxml import etree -from sqlalchemy import MetaData, create_engine, inspect +from sqlalchemy import MetaData, create_engine from sqlalchemy.sql.ddl import CreateIndex, CreateTable -from sqlalchemy.exc import ProgrammingError from graphlib import TopologicalSorter +from .dialect import get_dialect from .document import Document from .exceptions import DataModelConfigError, check_type from .table import ( @@ -54,6 +54,7 @@ class DataModel: lxml_schema: The `lxml.etree.XMLSchema` object associated with this data model data_flow_name: A short identifier used for the data model (`short_name` argument value) data_flow_long_name: A longer for the data model (`long_name` argument value) + dialect: A dialect class to manage db-specific behaviours db_schema: A database schema name to store the database tables source_tree: A text representation of the source data model tree target_tree: A text representation of the simplified data model tree which will be used to create target tables @@ -117,6 +118,8 @@ def __init__( ) self.db_type = self.engine.dialect.name + self.dialect = get_dialect(self.db_type) + self.model_config = self.dialect.validate_model_config(self.model_config) self.db_schema = db_schema self.temp_prefix = str(uuid4())[:8] if temp_prefix is None else temp_prefix @@ -151,12 +154,6 @@ def _validate_config(self, cfg): ("metadata_columns", list, []), ] } - if model_config["as_columnstore"] and self.db_type == "mssql": - model_config["as_columnstore"] = False - logger.info( - "Clustered columnstore indexes are only supported with MS SQL Server database, noop" - ) - return model_config @property @@ -659,23 +656,8 @@ def create_db_schema(self) -> None: You do not have to call this method explicitly when using [`Document.insert_into_target_tables()`](document.md#xml2db.document.Document.insert_into_target_tables). """ - - def do_create_schema(): - with self.engine.connect() as conn: - conn.execute(sqlalchemy.schema.CreateSchema(self.db_schema)) - conn.commit() - if self.db_schema is not None: - if self.db_type == "duckdb": - try: - do_create_schema() - except ProgrammingError: - pass - else: - inspector = inspect(self.engine) - if self.db_schema not in inspector.get_schema_names(): - do_create_schema() - + self.dialect.create_schema(self.engine, self.db_schema) logger.info(f"Created schema: {self.db_schema}") def drop_all_tables(self): diff --git a/src/xml2db/table/column.py b/src/xml2db/table/column.py index 3b78577..7504c11 100644 --- a/src/xml2db/table/column.py +++ b/src/xml2db/table/column.py @@ -1,18 +1,7 @@ import logging from typing import List, Iterable, Any, Union, TYPE_CHECKING -from sqlalchemy import ( - Integer, - Double, - Boolean, - BigInteger, - SmallInteger, - Column, - DateTime, - String, - LargeBinary, -) -from sqlalchemy.dialects import mssql, mysql +from sqlalchemy import Column if TYPE_CHECKING: from ..model import DataModel @@ -20,114 +9,6 @@ logger = logging.getLogger(__name__) -def types_mapping_default(temp: bool, col: "DataModelColumn") -> Any: - """Defines the sqlalchemy type to use for given column properties in target tables - - Args: - temp: are we targeting the temporary tables schema or the final tables? - col: an object representing a column of a table for which we are determining the SQL type to define - - Returns: - a sqlalchemy class representing the data type to be used - """ - if col.occurs[1] != 1: - return String(8000) - if col.data_type in ["decimal", "float", "double"]: - return Double - if col.data_type == "dateTime": - return DateTime(timezone=True) - if col.data_type in [ - "integer", - "int", - "nonPositiveInteger", - "nonNegativeInteger", - "positiveInteger", - "negativeInteger", - ]: - return Integer - if col.data_type == "boolean": - return Boolean - if col.data_type in ["short", "byte"]: - return SmallInteger - if col.data_type == "long": - return BigInteger - if col.data_type == "date": - return String(16) - if col.data_type == "time": - return String(18) - if col.data_type in ["string", "NMTOKEN", "duration", "token"]: - if col.max_length is None: - return String(1000) - min_length = 0 if col.min_length is None else col.min_length - if min_length >= col.max_length - 1 and not col.allow_empty: - return String(col.max_length) - return String(col.max_length) - if col.data_type == "binary": - return LargeBinary(col.max_length) - else: - logger.warning( - f"unknown type '{col.data_type}' for column '{col.name}', defaulting to VARCHAR(1000) " - f"(this can be overridden by providing a field type in the configuration)" - ) - return String(1000) - - -def types_mapping_mssql(temp: bool, col: "DataModelColumn") -> Any: - """Defines the MSSQL type to use for given column properties in target tables - - Args: - temp: are we targeting the temporary tables schema or the final tables? - col: an object representing a column of a table for which we are determining the SQL type to define - - Returns: - a sqlalchemy class representing the data type to be used - """ - if col.occurs[1] != 1: - return mssql.VARCHAR(8000) - if col.data_type == "dateTime": - # using the DATETIMEOFFSET directly in the temporary table caused issues when inserting data in the target - # table with INSERT INTO SELECT converts datetime VARCHAR to DATETIMEOFFSET without errors - return mssql.VARCHAR(100) if temp else mssql.DATETIMEOFFSET - if col.data_type == "date": - return mssql.VARCHAR(16) - if col.data_type == "time": - return mssql.VARCHAR(18) - if col.data_type in ["string", "NMTOKEN", "duration", "token"]: - if col.max_length is None: - return mssql.VARCHAR(1000) - min_length = 0 if col.min_length is None else col.min_length - if min_length >= col.max_length - 1 and not col.allow_empty: - return mssql.CHAR(col.max_length) - return mssql.VARCHAR(col.max_length) - if col.data_type == "binary": - if col.max_length == col.min_length: - return mssql.BINARY(col.max_length) - return mssql.VARBINARY(col.max_length) - return types_mapping_default(temp, col) - - -def types_mapping_mysql(temp: bool, col: "DataModelColumn") -> Any: - """Defines the MySQL/sqlalchemy type to use for given column properties in target tables - - Args: - temp: are we targeting the temporary tables schema or the final tables? - col: an object representing a column of a table for which we are determining the SQL type to define - - Returns: - a sqlalchemy class representing the data type to be used - """ - if col.occurs[1] != 1: - return String(4000) - if col.data_type in ["string", "NMTOKEN", "duration", "token"]: - if col.max_length is None: - return String(255) - if col.data_type == "binary": - if col.max_length == col.min_length: - return mysql.BINARY(col.max_length) - return mysql.VARBINARY(col.max_length) - return types_mapping_default(temp, col) - - class DataModelColumn: """A class representing a column of a table @@ -181,15 +62,6 @@ def __init__( self.model_config = model_config self.data_model = data_model self.other_table = None # just to avoid a linting warning - self.types_mapping = ( - types_mapping_mssql - if data_model.db_type == "mssql" - else ( - types_mapping_mysql - if data_model.db_type == "mysql" - else types_mapping_default - ) - ) @property def can_join_values_as_string(self): @@ -228,6 +100,6 @@ def get_sqlalchemy_column(self, temp: bool = False) -> Iterable[Column]: # use type specified in config if exists column_type = self.model_config.get("fields", {}).get(self.name, {}).get( "type" - ) or self.types_mapping(temp, self) - - yield Column(self.name, column_type) + ) or self.data_model.dialect.column_type(self, temp) + db_col = self.data_model.dialect.db_identifier(self.name) + yield Column(db_col, column_type, key=self.name) diff --git a/src/xml2db/table/duplicated_table.py b/src/xml2db/table/duplicated_table.py index 72a5b03..e67ca7e 100644 --- a/src/xml2db/table/duplicated_table.py +++ b/src/xml2db/table/duplicated_table.py @@ -5,11 +5,9 @@ Integer, ForeignKey, PrimaryKeyConstraint, - Index, Boolean, select, and_, - Sequence, ) from .transformed_table import DataModelTableTransformed @@ -41,6 +39,7 @@ def build_sqlalchemy_tables(self) -> None: return prefix = f"temp_{self.temp_prefix}_" + d = self.data_model.dialect def get_col(temp=False) -> Iterable[Column]: """Generator function to build sqlalchemy Column objects @@ -48,23 +47,30 @@ def get_col(temp=False) -> Iterable[Column]: Args: temp: are we targeting temp or target table? """ + # temp primary key which is used also in the final table to update back target pk if temp or self.referenced_as_fk: + logical = f"temp_pk_{self.name}" yield Column( - f"temp_pk_{self.name}", + d.db_identifier(logical), Integer, primary_key=temp, autoincrement=False, + key=logical, ) # foreign key column to link with parent if temp: - yield Column(f"temp_fk_parent_{self.parent.name}", Integer) - yield Column(f"fk_parent_{self.parent.name}", Integer) + logical_tmp = f"temp_fk_parent_{self.parent.name}" + yield Column(d.db_identifier(logical_tmp), Integer, key=logical_tmp) + logical_fk = f"fk_parent_{self.parent.name}" + yield Column(d.db_identifier(logical_fk), Integer, key=logical_fk) else: + logical_fk = f"fk_parent_{self.parent.name}" yield Column( - f"fk_parent_{self.parent.name}", + d.db_identifier(logical_fk), Integer, - ForeignKey(f"{self.parent.name}.pk_{self.parent.name}"), + ForeignKey(d.fk_ref(self.parent.name, f"pk_{self.parent.name}")), + key=logical_fk, ) # row_number if needed if self.data_model.model_config["row_numbers"]: @@ -84,46 +90,29 @@ def get_col(temp=False) -> Iterable[Column]: if callable(self.config.get("extra_args", [])) else self.config.get("extra_args", []) ) - if self.data_model.db_type == "duckdb": - pk_sequence = Sequence(f"pk_sequ_{self.name}") - pk_col = Column( - f"pk_{self.name}", - Integer, - pk_sequence, - server_default=pk_sequence.next_value(), - primary_key=True, - ) - else: - pk_col = Column( - f"pk_{self.name}", Integer, primary_key=True, autoincrement=True - ) + pk_col = self.data_model.dialect.pk_column(self.name) self.table = Table( - self.name, + d.db_identifier(self.name), self.metadata, pk_col, PrimaryKeyConstraint( - name=f"cx_pk_{self.name}", + name=d.db_identifier(f"cx_pk_{self.name}"), mssql_clustered=not self.config["as_columnstore"], ), *get_col(), *extra_args, ) - # set columnstore index - if self.config["as_columnstore"]: - self.table.append_constraint( - Index( - f"idx_{self.name}_columnstore", - mssql_clustered=True, - mssql_columnstore=True, - ) - ) + # set backend-specific extra indexes (e.g. columnstore) + for idx in self.data_model.dialect.extra_indexes(self.name, self.config): + self.table.append_constraint(idx) # build temporary table + logical_pk = f"pk_{self.name}" self.temp_table = Table( - f"{prefix}{self.name}", + d.db_identifier(f"{prefix}{self.name}"), self.metadata, - Column(f"pk_{self.name}", Integer), + Column(d.db_identifier(logical_pk), Integer, key=logical_pk), *get_col(temp=True), Column("temp_exists", Boolean, default=False), ) diff --git a/src/xml2db/table/relations.py b/src/xml2db/table/relations.py index 96245d1..fe33738 100644 --- a/src/xml2db/table/relations.py +++ b/src/xml2db/table/relations.py @@ -1,5 +1,5 @@ import sqlalchemy.engine -from sqlalchemy import Table, Column, ForeignKey, Integer, Index, select +from sqlalchemy import Table, Column, ForeignKey, Integer, select from typing import TYPE_CHECKING, List, Iterable, Any, Union if TYPE_CHECKING: @@ -59,14 +59,19 @@ def get_sqlalchemy_column(self, temp: bool = False): if not self.name.endswith(self.other_table.name) else f"fk_{self.name}" ) + d = self.data_model.dialect if temp: - yield Column(f"temp_{self.field_name}", Integer) - yield Column(self.field_name, Integer) + temp_logical = f"temp_{self.field_name}" + yield Column(d.db_identifier(temp_logical), Integer, key=temp_logical) + yield Column(d.db_identifier(self.field_name), Integer, key=self.field_name) else: yield Column( - self.field_name, + d.db_identifier(self.field_name), Integer, - ForeignKey(f"{self.other_table.name}.pk_{self.other_table.name}"), + ForeignKey( + d.fk_ref(self.other_table.name, f"pk_{self.other_table.name}") + ), + key=self.field_name, ) def get_merge_temp_records_statements(self) -> Iterable[Any]: @@ -101,13 +106,31 @@ def build_relation_tables(self) -> None: ) prefix = f"temp_{self.table.temp_prefix}_" if self.other_table.is_reused: + d = self.data_model.dialect + fk_self_logical = f"fk_{self.table.name}" + fk_other_logical = f"fk_{self.other_table.name}" + temp_fk_self_logical = f"temp_fk_{self.table.name}" + temp_fk_other_logical = f"temp_fk_{self.other_table.name}" + self.temp_rel_table = Table( - f"{prefix}{self.rel_table_name}", + d.db_identifier(f"{prefix}{self.rel_table_name}"), self.table.metadata, - Column(f"temp_fk_{self.table.name}", Integer, nullable=False), - Column(f"fk_{self.table.name}", Integer), - Column(f"temp_fk_{self.other_table.name}", Integer, nullable=False), - Column(f"fk_{self.other_table.name}", Integer), + Column( + d.db_identifier(temp_fk_self_logical), + Integer, + nullable=False, + key=temp_fk_self_logical, + ), + Column(d.db_identifier(fk_self_logical), Integer, key=fk_self_logical), + Column( + d.db_identifier(temp_fk_other_logical), + Integer, + nullable=False, + key=temp_fk_other_logical, + ), + Column( + d.db_identifier(fk_other_logical), Integer, key=fk_other_logical + ), *( ( Column( @@ -120,34 +143,35 @@ def build_relation_tables(self) -> None: else () ), ) - cl_index = tuple() - if self.data_model.db_type == "mssql": - # n-n relation tables don't have a primary key, so we define a clustered index on the first FK - cl_index = ( - Index( - f"ix_fk_{self.rel_table_name}", - f"fk_{self.table.name}", - f"fk_{self.other_table.name}", - mssql_clustered=True, - ), - ) - self.rel_table = Table( + # n-n relation tables don't have a primary key; get backend-specific clustered index + cl_index = d.relation_extra_indexes( self.rel_table_name, + fk_self_logical, + fk_other_logical, + self.table.config, + ) + + self.rel_table = Table( + d.db_identifier(self.rel_table_name), self.table.metadata, Column( - f"fk_{self.table.name}", + d.db_identifier(fk_self_logical), Integer, - ForeignKey(f"{self.table.name}.pk_{self.table.name}"), + ForeignKey(d.fk_ref(self.table.name, f"pk_{self.table.name}")), nullable=False, - index=(cl_index == tuple()), + index=(len(cl_index) == 0), + key=fk_self_logical, ), Column( - f"fk_{self.other_table.name}", + d.db_identifier(fk_other_logical), Integer, - ForeignKey(f"{self.other_table.name}.pk_{self.other_table.name}"), + ForeignKey( + d.fk_ref(self.other_table.name, f"pk_{self.other_table.name}") + ), nullable=False, index=True, + key=fk_other_logical, ), *( ( diff --git a/src/xml2db/table/reused_table.py b/src/xml2db/table/reused_table.py index 3c09851..754cc38 100644 --- a/src/xml2db/table/reused_table.py +++ b/src/xml2db/table/reused_table.py @@ -4,12 +4,10 @@ Table, Column, Integer, - Index, PrimaryKeyConstraint, UniqueConstraint, Boolean, select, - Sequence, ) from .column import DataModelColumn @@ -48,6 +46,7 @@ def build_sqlalchemy_tables(self): return prefix = f"temp_{self.temp_prefix}_" + d = self.data_model.dialect # build target table and n-n relations tables def get_col(temp=False): @@ -98,49 +97,37 @@ def get_col(temp=False): else self.config.get("extra_args", []) ) - if self.data_model.db_type == "duckdb": - pk_sequence = Sequence(f"pk_sequ_{self.name}") - pk_col = Column( - f"pk_{self.name}", - Integer, - pk_sequence, - server_default=pk_sequence.next_value(), - primary_key=True, - ) - else: - pk_col = Column( - f"pk_{self.name}", Integer, primary_key=True, autoincrement=True - ) + pk_col = d.pk_column(self.name) self.table = Table( - self.name, + d.db_identifier(self.name), self.metadata, pk_col, PrimaryKeyConstraint( - name=f"cx_pk_{self.name}", + name=d.db_identifier(f"cx_pk_{self.name}"), mssql_clustered=not self.config["as_columnstore"], ), *get_col(), *extra_args, ) - # set columnstore index - if self.config["as_columnstore"]: - self.table.append_constraint( - Index( - f"idx_{self.name}_columnstore", - mssql_clustered=True, - mssql_columnstore=True, - ) - ) + # set backend-specific extra indexes (e.g. columnstore) + for idx in self.data_model.dialect.extra_indexes(self.name, self.config): + self.table.append_constraint(idx) # build temporary table + logical_pk = f"pk_{self.name}" + logical_temp_pk = f"temp_pk_{self.name}" self.temp_table = Table( - f"{prefix}{self.name}", + d.db_identifier(f"{prefix}{self.name}"), self.metadata, - Column(f"pk_{self.name}", Integer), + Column(d.db_identifier(logical_pk), Integer, key=logical_pk), Column( - f"temp_pk_{self.name}", Integer, primary_key=True, autoincrement=False + d.db_identifier(logical_temp_pk), + Integer, + primary_key=True, + autoincrement=False, + key=logical_temp_pk, ), *get_col(temp=True), Column("temp_exists", Boolean, default=False), diff --git a/src/xml2db/table/table.py b/src/xml2db/table/table.py index 6e11a24..501c79d 100644 --- a/src/xml2db/table/table.py +++ b/src/xml2db/table/table.py @@ -62,7 +62,8 @@ def __init__( self.is_root_table = is_root_table self.is_virtual_node = is_virtual_node self.model_group = "sequence" - self.config = self._validate_config(config, data_model.db_type) + self.data_model = data_model + self.config = self._validate_config(config) self.db_schema = db_schema self.temp_prefix = temp_prefix @@ -90,9 +91,8 @@ def __init__( self.metadata = metadata self.table = None self.temp_table = None - self.data_model = data_model - def _validate_config(self, cfg, db_type): + def _validate_config(self, cfg): if cfg is None: cfg = {} @@ -112,13 +112,8 @@ def _validate_config(self, cfg, db_type): cfg, "choice_transform", bool, False ) - if config["as_columnstore"] and not db_type == "mssql": - config["as_columnstore"] = False - logger.warning( - "Clustered columnstore indexes are only supported with MS SQL Server database" - ) - config["fields"] = cfg.get("fields", {}) + config = self.data_model.dialect.validate_table_config(config) return config diff --git a/tests/sample_models/orders/orders.xsd b/tests/sample_models/orders/orders.xsd index 68914ae..0752850 100644 --- a/tests/sample_models/orders/orders.xsd +++ b/tests/sample_models/orders/orders.xsd @@ -36,6 +36,7 @@ + @@ -56,7 +57,7 @@ - + diff --git a/tests/sample_models/orders/orders_ddl_mssql_version0.sql b/tests/sample_models/orders/orders_ddl_mssql_version0.sql index 51662d8..b8f088a 100644 --- a/tests/sample_models/orders/orders_ddl_mssql_version0.sql +++ b/tests/sample_models/orders/orders_ddl_mssql_version0.sql @@ -13,19 +13,20 @@ CREATE TABLE orderperson ( [companyId_type] CHAR(3) NULL, [companyId_value] VARCHAR(1000) NULL, coordinates VARCHAR(1000) NULL, + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length VARCHAR(1000) NULL, record_hash BINARY(20) NULL, CONSTRAINT cx_pk_orderperson PRIMARY KEY CLUSTERED (pk_orderperson), CONSTRAINT orderperson_xml2db_record_hash UNIQUE (record_hash) ) -CREATE TABLE intfeature ( - pk_intfeature INTEGER NOT NULL IDENTITY, +CREATE TABLE intfeature_with_peculiarly_long_suffix_which_overflow_max_length ( + pk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length INTEGER NOT NULL IDENTITY, id VARCHAR(1000) NULL, value INTEGER NULL, record_hash BINARY(20) NULL, - CONSTRAINT cx_pk_intfeature PRIMARY KEY CLUSTERED (pk_intfeature), - CONSTRAINT intfeature_xml2db_record_hash UNIQUE (record_hash) + CONSTRAINT cx_pk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length PRIMARY KEY CLUSTERED (pk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length), + CONSTRAINT intfeature_with_peculia_0c087_xml2db_record_hash UNIQUE (record_hash) ) @@ -53,11 +54,11 @@ CREATE TABLE item ( ) -CREATE TABLE item_product_features_intfeature ( +CREATE TABLE item_product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length ( fk_item INTEGER NOT NULL, - fk_intfeature INTEGER NOT NULL, + fk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length INTEGER NOT NULL, FOREIGN KEY(fk_item) REFERENCES item (pk_item), - FOREIGN KEY(fk_intfeature) REFERENCES intfeature (pk_intfeature) + FOREIGN KEY(fk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length) REFERENCES intfeature_with_peculiarly_long_suffix_which_overflow_max_length (pk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length) ) @@ -109,9 +110,9 @@ CREATE TABLE orders_shiporder ( FOREIGN KEY(fk_shiporder) REFERENCES shiporder (pk_shiporder) ) -CREATE CLUSTERED INDEX ix_fk_item_product_features_intfeature ON item_product_features_intfeature (fk_item, fk_intfeature) +CREATE CLUSTERED INDEX ix_fk_item_product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length ON item_product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length (fk_item, fk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length) -CREATE INDEX ix_item_product_features_intfeature_fk_intfeature ON item_product_features_intfeature (fk_intfeature) +CREATE INDEX ix_item_product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length_fk_intfeature_with_peculiarly__7aff ON item_product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length (fk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length) CREATE CLUSTERED INDEX ix_fk_item_product_features_stringfeature ON item_product_features_stringfeature (fk_item, fk_stringfeature) diff --git a/tests/sample_models/orders/orders_ddl_mssql_version1.sql b/tests/sample_models/orders/orders_ddl_mssql_version1.sql index a7840aa..d65bc44 100644 --- a/tests/sample_models/orders/orders_ddl_mssql_version1.sql +++ b/tests/sample_models/orders/orders_ddl_mssql_version1.sql @@ -14,19 +14,20 @@ CREATE TABLE orderperson ( [companyId_bic] VARCHAR(1000) NULL, [companyId_lei] VARCHAR(1000) NULL, coordinates VARCHAR(1000) NULL, + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length VARCHAR(1000) NULL, record_hash BINARY(16) NULL, CONSTRAINT cx_pk_orderperson PRIMARY KEY CLUSTERED (pk_orderperson), CONSTRAINT orderperson_xml2db_record_hash UNIQUE (record_hash) ) -CREATE TABLE intfeature ( - pk_intfeature INTEGER NOT NULL IDENTITY, +CREATE TABLE intfeature_with_peculiarly_long_suffix_which_overflow_max_length ( + pk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length INTEGER NOT NULL IDENTITY, id VARCHAR(1000) NULL, value INTEGER NULL, record_hash BINARY(16) NULL, - CONSTRAINT cx_pk_intfeature PRIMARY KEY CLUSTERED (pk_intfeature), - CONSTRAINT intfeature_xml2db_record_hash UNIQUE (record_hash) + CONSTRAINT cx_pk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length PRIMARY KEY CLUSTERED (pk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length), + CONSTRAINT intfeature_with_peculia_0c087_xml2db_record_hash UNIQUE (record_hash) ) @@ -91,12 +92,12 @@ CREATE TABLE item ( ) -CREATE TABLE item_product_features_intfeature ( +CREATE TABLE item_product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length ( fk_item INTEGER NOT NULL, - fk_intfeature INTEGER NOT NULL, + fk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length INTEGER NOT NULL, xml2db_row_number INTEGER NOT NULL, FOREIGN KEY(fk_item) REFERENCES item (pk_item), - FOREIGN KEY(fk_intfeature) REFERENCES intfeature (pk_intfeature) + FOREIGN KEY(fk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length) REFERENCES intfeature_with_peculiarly_long_suffix_which_overflow_max_length (pk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length) ) @@ -112,9 +113,9 @@ CREATE CLUSTERED INDEX ix_fk_orders_shiporder ON orders_shiporder (fk_orders, fk CREATE INDEX ix_orders_shiporder_fk_shiporder ON orders_shiporder (fk_shiporder) -CREATE CLUSTERED INDEX ix_fk_item_product_features_intfeature ON item_product_features_intfeature (fk_item, fk_intfeature) +CREATE CLUSTERED INDEX ix_fk_item_product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length ON item_product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length (fk_item, fk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length) -CREATE INDEX ix_item_product_features_intfeature_fk_intfeature ON item_product_features_intfeature (fk_intfeature) +CREATE INDEX ix_item_product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length_fk_intfeature_with_peculiarly__7aff ON item_product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length (fk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length) CREATE CLUSTERED INDEX ix_fk_item_product_features_stringfeature ON item_product_features_stringfeature (fk_item, fk_stringfeature) diff --git a/tests/sample_models/orders/orders_ddl_mssql_version2.sql b/tests/sample_models/orders/orders_ddl_mssql_version2.sql index d09e427..15c7406 100644 --- a/tests/sample_models/orders/orders_ddl_mssql_version2.sql +++ b/tests/sample_models/orders/orders_ddl_mssql_version2.sql @@ -24,19 +24,20 @@ CREATE TABLE orderperson ( [companyId_type] CHAR(3) NULL, [companyId_value] VARCHAR(1000) NULL, coordinates VARCHAR(1000) NULL, + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length VARCHAR(1000) NULL, xml2db_record_hash BINARY(20) NULL, CONSTRAINT cx_pk_orderperson PRIMARY KEY CLUSTERED (pk_orderperson), CONSTRAINT orderperson_xml2db_record_hash UNIQUE (xml2db_record_hash) ) -CREATE TABLE intfeature ( - pk_intfeature INTEGER NOT NULL IDENTITY, +CREATE TABLE intfeature_with_peculiarly_long_suffix_which_overflow_max_length ( + pk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length INTEGER NOT NULL IDENTITY, id VARCHAR(1000) NULL, value INTEGER NULL, xml2db_record_hash BINARY(20) NULL, - CONSTRAINT cx_pk_intfeature PRIMARY KEY CLUSTERED (pk_intfeature), - CONSTRAINT intfeature_xml2db_record_hash UNIQUE (xml2db_record_hash) + CONSTRAINT cx_pk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length PRIMARY KEY CLUSTERED (pk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length), + CONSTRAINT intfeature_with_peculia_0c087_xml2db_record_hash UNIQUE (xml2db_record_hash) ) @@ -60,11 +61,11 @@ CREATE TABLE product ( ) -CREATE TABLE product_features_intfeature ( +CREATE TABLE product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length ( fk_product INTEGER NOT NULL, - fk_intfeature INTEGER NOT NULL, + fk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length INTEGER NOT NULL, FOREIGN KEY(fk_product) REFERENCES product (pk_product), - FOREIGN KEY(fk_intfeature) REFERENCES intfeature (pk_intfeature) + FOREIGN KEY(fk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length) REFERENCES intfeature_with_peculiarly_long_suffix_which_overflow_max_length (pk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length) ) @@ -108,6 +109,7 @@ CREATE TABLE shiporder ( [orderperson_companyId_type] CHAR(3) NULL, [orderperson_companyId_value] VARCHAR(1000) NULL, orderperson_coordinates VARCHAR(1000) NULL, + orderperson_a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length VARCHAR(1000) NULL, shipto_fk_orderperson INTEGER NULL, CONSTRAINT cx_pk_shiporder PRIMARY KEY CLUSTERED (pk_shiporder), FOREIGN KEY(fk_parent_orders) REFERENCES orders (pk_orders), @@ -122,9 +124,9 @@ CREATE TABLE shiporder_item ( FOREIGN KEY(fk_item) REFERENCES item (pk_item) ) -CREATE CLUSTERED INDEX ix_fk_product_features_intfeature ON product_features_intfeature (fk_product, fk_intfeature) +CREATE CLUSTERED INDEX ix_fk_product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length ON product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length (fk_product, fk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length) -CREATE INDEX ix_product_features_intfeature_fk_intfeature ON product_features_intfeature (fk_intfeature) +CREATE INDEX ix_product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length_fk_intfeature_with_peculiarly_long__3ab3 ON product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length (fk_intfeature_with_peculiarly_long_suffix_which_overflow_max_length) CREATE CLUSTERED INDEX ix_fk_product_features_stringfeature ON product_features_stringfeature (fk_product, fk_stringfeature) diff --git a/tests/sample_models/orders/orders_ddl_mysql_version0.sql b/tests/sample_models/orders/orders_ddl_mysql_version0.sql index 45347ff..c4a3eb5 100644 --- a/tests/sample_models/orders/orders_ddl_mysql_version0.sql +++ b/tests/sample_models/orders/orders_ddl_mysql_version0.sql @@ -13,19 +13,20 @@ CREATE TABLE orderperson ( `companyId_type` VARCHAR(3), `companyId_value` VARCHAR(255), coordinates VARCHAR(255), + a_very_long_field_type_that_makes_col_name_excee_223ada0 VARCHAR(255), record_hash BINARY(20), CONSTRAINT cx_pk_orderperson PRIMARY KEY (pk_orderperson), CONSTRAINT orderperson_xml2db_record_hash UNIQUE (record_hash) ) -CREATE TABLE intfeature ( - pk_intfeature INTEGER NOT NULL AUTO_INCREMENT, +CREATE TABLE intfeature_with_peculiarly_long_suffix_which_ove_5868736 ( + pk_intfeature_with_peculiarly_long_suffix_which__85b659b INTEGER NOT NULL AUTO_INCREMENT, id VARCHAR(255), value INTEGER, record_hash BINARY(20), - CONSTRAINT cx_pk_intfeature PRIMARY KEY (pk_intfeature), - CONSTRAINT intfeature_xml2db_record_hash UNIQUE (record_hash) + CONSTRAINT cx_pk_intfeature_with_peculiarly_long_suffix_whi_ecb17be PRIMARY KEY (pk_intfeature_with_peculiarly_long_suffix_which__85b659b), + CONSTRAINT intfeature_with_peculia_0c087_xml2db_record_hash UNIQUE (record_hash) ) @@ -53,11 +54,11 @@ CREATE TABLE item ( ) -CREATE TABLE item_product_features_intfeature ( +CREATE TABLE item_product_features_intfeature_with_peculiarly_779d1ac ( fk_item INTEGER NOT NULL, - fk_intfeature INTEGER NOT NULL, + fk_intfeature_with_peculiarly_long_suffix_which__00590e9 INTEGER NOT NULL, FOREIGN KEY(fk_item) REFERENCES item (pk_item), - FOREIGN KEY(fk_intfeature) REFERENCES intfeature (pk_intfeature) + FOREIGN KEY(fk_intfeature_with_peculiarly_long_suffix_which__00590e9) REFERENCES intfeature_with_peculiarly_long_suffix_which_ove_5868736 (pk_intfeature_with_peculiarly_long_suffix_which__85b659b) ) @@ -109,9 +110,9 @@ CREATE TABLE orders_shiporder ( FOREIGN KEY(fk_shiporder) REFERENCES shiporder (pk_shiporder) ) -CREATE INDEX ix_item_product_features_intfeature_fk_intfeature ON item_product_features_intfeature (fk_intfeature) +CREATE INDEX ix_item_product_features_intfeature_with_peculiarly_779d_b099 ON item_product_features_intfeature_with_peculiarly_779d1ac (fk_intfeature_with_peculiarly_long_suffix_which__00590e9) -CREATE INDEX ix_item_product_features_intfeature_fk_item ON item_product_features_intfeature (fk_item) +CREATE INDEX ix_item_product_features_intfeature_with_peculiarly_779d_4520 ON item_product_features_intfeature_with_peculiarly_779d1ac (fk_item) CREATE INDEX ix_item_product_features_stringfeature_fk_item ON item_product_features_stringfeature (fk_item) diff --git a/tests/sample_models/orders/orders_ddl_mysql_version1.sql b/tests/sample_models/orders/orders_ddl_mysql_version1.sql index 2d6626b..8a2139f 100644 --- a/tests/sample_models/orders/orders_ddl_mysql_version1.sql +++ b/tests/sample_models/orders/orders_ddl_mysql_version1.sql @@ -14,19 +14,20 @@ CREATE TABLE orderperson ( `companyId_bic` VARCHAR(255), `companyId_lei` VARCHAR(255), coordinates VARCHAR(255), + a_very_long_field_type_that_makes_col_name_excee_223ada0 VARCHAR(255), record_hash BINARY(16), CONSTRAINT cx_pk_orderperson PRIMARY KEY (pk_orderperson), CONSTRAINT orderperson_xml2db_record_hash UNIQUE (record_hash) ) -CREATE TABLE intfeature ( - pk_intfeature INTEGER NOT NULL AUTO_INCREMENT, +CREATE TABLE intfeature_with_peculiarly_long_suffix_which_ove_5868736 ( + pk_intfeature_with_peculiarly_long_suffix_which__85b659b INTEGER NOT NULL AUTO_INCREMENT, id VARCHAR(255), value INTEGER, record_hash BINARY(16), - CONSTRAINT cx_pk_intfeature PRIMARY KEY (pk_intfeature), - CONSTRAINT intfeature_xml2db_record_hash UNIQUE (record_hash) + CONSTRAINT cx_pk_intfeature_with_peculiarly_long_suffix_whi_ecb17be PRIMARY KEY (pk_intfeature_with_peculiarly_long_suffix_which__85b659b), + CONSTRAINT intfeature_with_peculia_0c087_xml2db_record_hash UNIQUE (record_hash) ) @@ -91,12 +92,12 @@ CREATE TABLE item ( ) -CREATE TABLE item_product_features_intfeature ( +CREATE TABLE item_product_features_intfeature_with_peculiarly_779d1ac ( fk_item INTEGER NOT NULL, - fk_intfeature INTEGER NOT NULL, + fk_intfeature_with_peculiarly_long_suffix_which__00590e9 INTEGER NOT NULL, xml2db_row_number INTEGER NOT NULL, FOREIGN KEY(fk_item) REFERENCES item (pk_item), - FOREIGN KEY(fk_intfeature) REFERENCES intfeature (pk_intfeature) + FOREIGN KEY(fk_intfeature_with_peculiarly_long_suffix_which__00590e9) REFERENCES intfeature_with_peculiarly_long_suffix_which_ove_5868736 (pk_intfeature_with_peculiarly_long_suffix_which__85b659b) ) @@ -112,9 +113,9 @@ CREATE INDEX ix_orders_shiporder_fk_orders ON orders_shiporder (fk_orders) CREATE INDEX ix_orders_shiporder_fk_shiporder ON orders_shiporder (fk_shiporder) -CREATE INDEX ix_item_product_features_intfeature_fk_intfeature ON item_product_features_intfeature (fk_intfeature) +CREATE INDEX ix_item_product_features_intfeature_with_peculiarly_779d_b099 ON item_product_features_intfeature_with_peculiarly_779d1ac (fk_intfeature_with_peculiarly_long_suffix_which__00590e9) -CREATE INDEX ix_item_product_features_intfeature_fk_item ON item_product_features_intfeature (fk_item) +CREATE INDEX ix_item_product_features_intfeature_with_peculiarly_779d_4520 ON item_product_features_intfeature_with_peculiarly_779d1ac (fk_item) CREATE INDEX ix_item_product_features_stringfeature_fk_item ON item_product_features_stringfeature (fk_item) diff --git a/tests/sample_models/orders/orders_ddl_mysql_version2.sql b/tests/sample_models/orders/orders_ddl_mysql_version2.sql index c3ab769..a556153 100644 --- a/tests/sample_models/orders/orders_ddl_mysql_version2.sql +++ b/tests/sample_models/orders/orders_ddl_mysql_version2.sql @@ -24,19 +24,20 @@ CREATE TABLE orderperson ( `companyId_type` VARCHAR(3), `companyId_value` VARCHAR(255), coordinates VARCHAR(255), + a_very_long_field_type_that_makes_col_name_excee_223ada0 VARCHAR(255), xml2db_record_hash BINARY(20), CONSTRAINT cx_pk_orderperson PRIMARY KEY (pk_orderperson), CONSTRAINT orderperson_xml2db_record_hash UNIQUE (xml2db_record_hash) ) -CREATE TABLE intfeature ( - pk_intfeature INTEGER NOT NULL AUTO_INCREMENT, +CREATE TABLE intfeature_with_peculiarly_long_suffix_which_ove_5868736 ( + pk_intfeature_with_peculiarly_long_suffix_which__85b659b INTEGER NOT NULL AUTO_INCREMENT, id VARCHAR(255), value INTEGER, xml2db_record_hash BINARY(20), - CONSTRAINT cx_pk_intfeature PRIMARY KEY (pk_intfeature), - CONSTRAINT intfeature_xml2db_record_hash UNIQUE (xml2db_record_hash) + CONSTRAINT cx_pk_intfeature_with_peculiarly_long_suffix_whi_ecb17be PRIMARY KEY (pk_intfeature_with_peculiarly_long_suffix_which__85b659b), + CONSTRAINT intfeature_with_peculia_0c087_xml2db_record_hash UNIQUE (xml2db_record_hash) ) @@ -60,11 +61,11 @@ CREATE TABLE product ( ) -CREATE TABLE product_features_intfeature ( +CREATE TABLE product_features_intfeature_with_peculiarly_long_82a4847 ( fk_product INTEGER NOT NULL, - fk_intfeature INTEGER NOT NULL, + fk_intfeature_with_peculiarly_long_suffix_which__00590e9 INTEGER NOT NULL, FOREIGN KEY(fk_product) REFERENCES product (pk_product), - FOREIGN KEY(fk_intfeature) REFERENCES intfeature (pk_intfeature) + FOREIGN KEY(fk_intfeature_with_peculiarly_long_suffix_which__00590e9) REFERENCES intfeature_with_peculiarly_long_suffix_which_ove_5868736 (pk_intfeature_with_peculiarly_long_suffix_which__85b659b) ) @@ -108,6 +109,7 @@ CREATE TABLE shiporder ( `orderperson_companyId_type` VARCHAR(3), `orderperson_companyId_value` VARCHAR(255), orderperson_coordinates VARCHAR(255), + orderperson_a_very_long_field_type_that_makes_co_ee3c2ee VARCHAR(255), shipto_fk_orderperson INTEGER, CONSTRAINT cx_pk_shiporder PRIMARY KEY (pk_shiporder), FOREIGN KEY(fk_parent_orders) REFERENCES orders (pk_orders), @@ -122,9 +124,9 @@ CREATE TABLE shiporder_item ( FOREIGN KEY(fk_item) REFERENCES item (pk_item) ) -CREATE INDEX ix_product_features_intfeature_fk_intfeature ON product_features_intfeature (fk_intfeature) +CREATE INDEX ix_product_features_intfeature_with_peculiarly_long_82a4_da9b ON product_features_intfeature_with_peculiarly_long_82a4847 (fk_intfeature_with_peculiarly_long_suffix_which__00590e9) -CREATE INDEX ix_product_features_intfeature_fk_product ON product_features_intfeature (fk_product) +CREATE INDEX ix_product_features_intfeature_with_peculiarly_long_82a4_6910 ON product_features_intfeature_with_peculiarly_long_82a4847 (fk_product) CREATE INDEX ix_product_features_stringfeature_fk_product ON product_features_stringfeature (fk_product) diff --git a/tests/sample_models/orders/orders_ddl_postgresql_version0.sql b/tests/sample_models/orders/orders_ddl_postgresql_version0.sql index 204cd70..9c2c92a 100644 --- a/tests/sample_models/orders/orders_ddl_postgresql_version0.sql +++ b/tests/sample_models/orders/orders_ddl_postgresql_version0.sql @@ -13,19 +13,20 @@ CREATE TABLE orderperson ( "companyId_type" VARCHAR(3), "companyId_value" VARCHAR(1000), coordinates VARCHAR(1000), + a_very_long_field_type_that_makes_col_name_exceeds_max__223ada0 VARCHAR(1000), record_hash BYTEA, CONSTRAINT cx_pk_orderperson PRIMARY KEY (pk_orderperson), CONSTRAINT orderperson_xml2db_record_hash UNIQUE (record_hash) ) -CREATE TABLE intfeature ( - pk_intfeature SERIAL NOT NULL, +CREATE TABLE intfeature_with_peculiarly_long_suffix_which_overflow_m_5868736 ( + pk_intfeature_with_peculiarly_long_suffix_which_overflo_85b659b SERIAL NOT NULL, id VARCHAR(1000), value INTEGER, record_hash BYTEA, - CONSTRAINT cx_pk_intfeature PRIMARY KEY (pk_intfeature), - CONSTRAINT intfeature_xml2db_record_hash UNIQUE (record_hash) + CONSTRAINT cx_pk_intfeature_with_peculiarly_long_suffix_which_over_ecb17be PRIMARY KEY (pk_intfeature_with_peculiarly_long_suffix_which_overflo_85b659b), + CONSTRAINT intfeature_with_peculia_0c087_xml2db_record_hash UNIQUE (record_hash) ) @@ -53,11 +54,11 @@ CREATE TABLE item ( ) -CREATE TABLE item_product_features_intfeature ( +CREATE TABLE item_product_features_intfeature_with_peculiarly_long_s_779d1ac ( fk_item INTEGER NOT NULL, - fk_intfeature INTEGER NOT NULL, + fk_intfeature_with_peculiarly_long_suffix_which_overflo_00590e9 INTEGER NOT NULL, FOREIGN KEY(fk_item) REFERENCES item (pk_item), - FOREIGN KEY(fk_intfeature) REFERENCES intfeature (pk_intfeature) + FOREIGN KEY(fk_intfeature_with_peculiarly_long_suffix_which_overflo_00590e9) REFERENCES intfeature_with_peculiarly_long_suffix_which_overflow_m_5868736 (pk_intfeature_with_peculiarly_long_suffix_which_overflo_85b659b) ) @@ -109,9 +110,9 @@ CREATE TABLE orders_shiporder ( FOREIGN KEY(fk_shiporder) REFERENCES shiporder (pk_shiporder) ) -CREATE INDEX ix_item_product_features_intfeature_fk_intfeature ON item_product_features_intfeature (fk_intfeature) +CREATE INDEX ix_item_product_features_intfeature_with_peculiarly_lon_36ea ON item_product_features_intfeature_with_peculiarly_long_s_779d1ac (fk_intfeature_with_peculiarly_long_suffix_which_overflo_00590e9) -CREATE INDEX ix_item_product_features_intfeature_fk_item ON item_product_features_intfeature (fk_item) +CREATE INDEX ix_item_product_features_intfeature_with_peculiarly_lon_124e ON item_product_features_intfeature_with_peculiarly_long_s_779d1ac (fk_item) CREATE INDEX ix_item_product_features_stringfeature_fk_item ON item_product_features_stringfeature (fk_item) diff --git a/tests/sample_models/orders/orders_ddl_postgresql_version1.sql b/tests/sample_models/orders/orders_ddl_postgresql_version1.sql index 7955a4b..0abbb86 100644 --- a/tests/sample_models/orders/orders_ddl_postgresql_version1.sql +++ b/tests/sample_models/orders/orders_ddl_postgresql_version1.sql @@ -14,19 +14,20 @@ CREATE TABLE orderperson ( "companyId_bic" VARCHAR(1000), "companyId_lei" VARCHAR(1000), coordinates VARCHAR(1000), + a_very_long_field_type_that_makes_col_name_exceeds_max__223ada0 VARCHAR(1000), record_hash BYTEA, CONSTRAINT cx_pk_orderperson PRIMARY KEY (pk_orderperson), CONSTRAINT orderperson_xml2db_record_hash UNIQUE (record_hash) ) -CREATE TABLE intfeature ( - pk_intfeature SERIAL NOT NULL, +CREATE TABLE intfeature_with_peculiarly_long_suffix_which_overflow_m_5868736 ( + pk_intfeature_with_peculiarly_long_suffix_which_overflo_85b659b SERIAL NOT NULL, id VARCHAR(1000), value INTEGER, record_hash BYTEA, - CONSTRAINT cx_pk_intfeature PRIMARY KEY (pk_intfeature), - CONSTRAINT intfeature_xml2db_record_hash UNIQUE (record_hash) + CONSTRAINT cx_pk_intfeature_with_peculiarly_long_suffix_which_over_ecb17be PRIMARY KEY (pk_intfeature_with_peculiarly_long_suffix_which_overflo_85b659b), + CONSTRAINT intfeature_with_peculia_0c087_xml2db_record_hash UNIQUE (record_hash) ) @@ -91,12 +92,12 @@ CREATE TABLE item ( ) -CREATE TABLE item_product_features_intfeature ( +CREATE TABLE item_product_features_intfeature_with_peculiarly_long_s_779d1ac ( fk_item INTEGER NOT NULL, - fk_intfeature INTEGER NOT NULL, + fk_intfeature_with_peculiarly_long_suffix_which_overflo_00590e9 INTEGER NOT NULL, xml2db_row_number INTEGER NOT NULL, FOREIGN KEY(fk_item) REFERENCES item (pk_item), - FOREIGN KEY(fk_intfeature) REFERENCES intfeature (pk_intfeature) + FOREIGN KEY(fk_intfeature_with_peculiarly_long_suffix_which_overflo_00590e9) REFERENCES intfeature_with_peculiarly_long_suffix_which_overflow_m_5868736 (pk_intfeature_with_peculiarly_long_suffix_which_overflo_85b659b) ) @@ -112,9 +113,9 @@ CREATE INDEX ix_orders_shiporder_fk_orders ON orders_shiporder (fk_orders) CREATE INDEX ix_orders_shiporder_fk_shiporder ON orders_shiporder (fk_shiporder) -CREATE INDEX ix_item_product_features_intfeature_fk_intfeature ON item_product_features_intfeature (fk_intfeature) +CREATE INDEX ix_item_product_features_intfeature_with_peculiarly_lon_36ea ON item_product_features_intfeature_with_peculiarly_long_s_779d1ac (fk_intfeature_with_peculiarly_long_suffix_which_overflo_00590e9) -CREATE INDEX ix_item_product_features_intfeature_fk_item ON item_product_features_intfeature (fk_item) +CREATE INDEX ix_item_product_features_intfeature_with_peculiarly_lon_124e ON item_product_features_intfeature_with_peculiarly_long_s_779d1ac (fk_item) CREATE INDEX ix_item_product_features_stringfeature_fk_item ON item_product_features_stringfeature (fk_item) diff --git a/tests/sample_models/orders/orders_ddl_postgresql_version2.sql b/tests/sample_models/orders/orders_ddl_postgresql_version2.sql index ed9c440..03591a5 100644 --- a/tests/sample_models/orders/orders_ddl_postgresql_version2.sql +++ b/tests/sample_models/orders/orders_ddl_postgresql_version2.sql @@ -24,19 +24,20 @@ CREATE TABLE orderperson ( "companyId_type" VARCHAR(3), "companyId_value" VARCHAR(1000), coordinates VARCHAR(1000), + a_very_long_field_type_that_makes_col_name_exceeds_max__223ada0 VARCHAR(1000), xml2db_record_hash BYTEA, CONSTRAINT cx_pk_orderperson PRIMARY KEY (pk_orderperson), CONSTRAINT orderperson_xml2db_record_hash UNIQUE (xml2db_record_hash) ) -CREATE TABLE intfeature ( - pk_intfeature SERIAL NOT NULL, +CREATE TABLE intfeature_with_peculiarly_long_suffix_which_overflow_m_5868736 ( + pk_intfeature_with_peculiarly_long_suffix_which_overflo_85b659b SERIAL NOT NULL, id VARCHAR(1000), value INTEGER, xml2db_record_hash BYTEA, - CONSTRAINT cx_pk_intfeature PRIMARY KEY (pk_intfeature), - CONSTRAINT intfeature_xml2db_record_hash UNIQUE (xml2db_record_hash) + CONSTRAINT cx_pk_intfeature_with_peculiarly_long_suffix_which_over_ecb17be PRIMARY KEY (pk_intfeature_with_peculiarly_long_suffix_which_overflo_85b659b), + CONSTRAINT intfeature_with_peculia_0c087_xml2db_record_hash UNIQUE (xml2db_record_hash) ) @@ -60,11 +61,11 @@ CREATE TABLE product ( ) -CREATE TABLE product_features_intfeature ( +CREATE TABLE product_features_intfeature_with_peculiarly_long_suffix_82a4847 ( fk_product INTEGER NOT NULL, - fk_intfeature INTEGER NOT NULL, + fk_intfeature_with_peculiarly_long_suffix_which_overflo_00590e9 INTEGER NOT NULL, FOREIGN KEY(fk_product) REFERENCES product (pk_product), - FOREIGN KEY(fk_intfeature) REFERENCES intfeature (pk_intfeature) + FOREIGN KEY(fk_intfeature_with_peculiarly_long_suffix_which_overflo_00590e9) REFERENCES intfeature_with_peculiarly_long_suffix_which_overflow_m_5868736 (pk_intfeature_with_peculiarly_long_suffix_which_overflo_85b659b) ) @@ -108,6 +109,7 @@ CREATE TABLE shiporder ( "orderperson_companyId_type" VARCHAR(3), "orderperson_companyId_value" VARCHAR(1000), orderperson_coordinates VARCHAR(1000), + orderperson_a_very_long_field_type_that_makes_col_name__ee3c2ee VARCHAR(1000), shipto_fk_orderperson INTEGER, CONSTRAINT cx_pk_shiporder PRIMARY KEY (pk_shiporder), FOREIGN KEY(fk_parent_orders) REFERENCES orders (pk_orders), @@ -122,9 +124,9 @@ CREATE TABLE shiporder_item ( FOREIGN KEY(fk_item) REFERENCES item (pk_item) ) -CREATE INDEX ix_product_features_intfeature_fk_intfeature ON product_features_intfeature (fk_intfeature) +CREATE INDEX ix_product_features_intfeature_with_peculiarly_long_suf_63f4 ON product_features_intfeature_with_peculiarly_long_suffix_82a4847 (fk_intfeature_with_peculiarly_long_suffix_which_overflo_00590e9) -CREATE INDEX ix_product_features_intfeature_fk_product ON product_features_intfeature (fk_product) +CREATE INDEX ix_product_features_intfeature_with_peculiarly_long_suf_0375 ON product_features_intfeature_with_peculiarly_long_suffix_82a4847 (fk_product) CREATE INDEX ix_product_features_stringfeature_fk_product ON product_features_stringfeature (fk_product) diff --git a/tests/sample_models/orders/orders_erd_version0.md b/tests/sample_models/orders/orders_erd_version0.md index 83291d8..6c9d33d 100644 --- a/tests/sample_models/orders/orders_erd_version0.md +++ b/tests/sample_models/orders/orders_erd_version0.md @@ -12,7 +12,7 @@ erDiagram string orderid dateTime processed_at } - item ||--o{ intfeature : "product_features_intfeature*" + item ||--o{ intfeature_with_peculiarly_long_suffix_which_overflow_max_length : "product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length*" item ||--o{ stringfeature : "product_features_stringfeature*" item { string product_name @@ -26,7 +26,7 @@ erDiagram string id string value } - intfeature { + intfeature_with_peculiarly_long_suffix_which_overflow_max_length { string id integer value } @@ -43,5 +43,6 @@ erDiagram string companyId_type string companyId_value string coordinates + string a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length } ``` \ No newline at end of file diff --git a/tests/sample_models/orders/orders_erd_version1.md b/tests/sample_models/orders/orders_erd_version1.md index 2261d1c..b2eb1ff 100644 --- a/tests/sample_models/orders/orders_erd_version1.md +++ b/tests/sample_models/orders/orders_erd_version1.md @@ -1,6 +1,6 @@ ```mermaid erDiagram - item ||--o{ intfeature : "product_features_intfeature*" + item ||--o{ intfeature_with_peculiarly_long_suffix_which_overflow_max_length : "product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length*" item ||--o{ stringfeature : "product_features_stringfeature*" item { string product_name @@ -26,7 +26,7 @@ erDiagram string id string value } - intfeature { + intfeature_with_peculiarly_long_suffix_which_overflow_max_length { string id integer value } @@ -44,5 +44,6 @@ erDiagram string companyId_bic string companyId_lei string coordinates + string a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length } ``` \ No newline at end of file diff --git a/tests/sample_models/orders/orders_erd_version2.md b/tests/sample_models/orders/orders_erd_version2.md index fa07d52..c453517 100644 --- a/tests/sample_models/orders/orders_erd_version2.md +++ b/tests/sample_models/orders/orders_erd_version2.md @@ -17,6 +17,7 @@ erDiagram string orderperson_companyId_type string orderperson_companyId_value string orderperson_coordinates + string orderperson_a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length } item ||--|| product : "product" item { @@ -25,7 +26,7 @@ erDiagram decimal price string currency } - product ||--o{ intfeature : "features_intfeature*" + product ||--o{ intfeature_with_peculiarly_long_suffix_which_overflow_max_length : "features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length*" product ||--o{ stringfeature : "features_stringfeature*" product { string name @@ -35,7 +36,7 @@ erDiagram string id string value } - intfeature { + intfeature_with_peculiarly_long_suffix_which_overflow_max_length { string id integer value } @@ -52,6 +53,7 @@ erDiagram string companyId_type string companyId_value string coordinates + string a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length } orders ||--o{ shiporder : "shiporder" orders { diff --git a/tests/sample_models/orders/orders_source_tree_version0.txt b/tests/sample_models/orders/orders_source_tree_version0.txt index a5004db..870012b 100644 --- a/tests/sample_models/orders/orders_source_tree_version0.txt +++ b/tests/sample_models/orders/orders_source_tree_version0.txt @@ -21,6 +21,7 @@ orders: lei[0, 1]: string coordinates[0, 1]: string extra[0, 1]: + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length[0, 1]: string shipto[0, 1]: name_attr[0, 1]: string name[1, 1]: string @@ -38,12 +39,13 @@ orders: lei[0, 1]: string coordinates[0, 1]: string extra[0, 1]: + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length[0, 1]: string item[1, None]: product[1, 1]: name[1, 1]: string version[1, 1]: string features[0, 1]: - intfeature[0, None]: + intfeature_with_peculiarly_long_suffix_which_overflow_max_length[0, None]: id[1, 1]: string value[1, 1]: integer stringfeature[0, None]: diff --git a/tests/sample_models/orders/orders_source_tree_version1.txt b/tests/sample_models/orders/orders_source_tree_version1.txt index a5004db..870012b 100644 --- a/tests/sample_models/orders/orders_source_tree_version1.txt +++ b/tests/sample_models/orders/orders_source_tree_version1.txt @@ -21,6 +21,7 @@ orders: lei[0, 1]: string coordinates[0, 1]: string extra[0, 1]: + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length[0, 1]: string shipto[0, 1]: name_attr[0, 1]: string name[1, 1]: string @@ -38,12 +39,13 @@ orders: lei[0, 1]: string coordinates[0, 1]: string extra[0, 1]: + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length[0, 1]: string item[1, None]: product[1, 1]: name[1, 1]: string version[1, 1]: string features[0, 1]: - intfeature[0, None]: + intfeature_with_peculiarly_long_suffix_which_overflow_max_length[0, None]: id[1, 1]: string value[1, 1]: integer stringfeature[0, None]: diff --git a/tests/sample_models/orders/orders_source_tree_version2.txt b/tests/sample_models/orders/orders_source_tree_version2.txt index a5004db..870012b 100644 --- a/tests/sample_models/orders/orders_source_tree_version2.txt +++ b/tests/sample_models/orders/orders_source_tree_version2.txt @@ -21,6 +21,7 @@ orders: lei[0, 1]: string coordinates[0, 1]: string extra[0, 1]: + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length[0, 1]: string shipto[0, 1]: name_attr[0, 1]: string name[1, 1]: string @@ -38,12 +39,13 @@ orders: lei[0, 1]: string coordinates[0, 1]: string extra[0, 1]: + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length[0, 1]: string item[1, None]: product[1, 1]: name[1, 1]: string version[1, 1]: string features[0, 1]: - intfeature[0, None]: + intfeature_with_peculiarly_long_suffix_which_overflow_max_length[0, None]: id[1, 1]: string value[1, 1]: integer stringfeature[0, None]: diff --git a/tests/sample_models/orders/orders_target_tree_version0.txt b/tests/sample_models/orders/orders_target_tree_version0.txt index c121bc6..65a7f71 100644 --- a/tests/sample_models/orders/orders_target_tree_version0.txt +++ b/tests/sample_models/orders/orders_target_tree_version0.txt @@ -17,6 +17,7 @@ orders: companyId_type[0, 1]: string companyId_value[0, 1]: string coordinates[0, 1]: string + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length[0, 1]: string shipto[0, 1]: name_attr[0, 1]: string name[1, 1]: string @@ -30,10 +31,11 @@ orders: companyId_type[0, 1]: string companyId_value[0, 1]: string coordinates[0, 1]: string + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length[0, 1]: string item[1, None]: product_name[1, 1]: string product_version[1, 1]: string - product_features_intfeature[0, None]: + product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length[0, None]: id[1, 1]: string value[1, 1]: integer product_features_stringfeature[0, None]: diff --git a/tests/sample_models/orders/orders_target_tree_version1.txt b/tests/sample_models/orders/orders_target_tree_version1.txt index ae03934..92a8909 100644 --- a/tests/sample_models/orders/orders_target_tree_version1.txt +++ b/tests/sample_models/orders/orders_target_tree_version1.txt @@ -18,6 +18,7 @@ orders: companyId_bic[0, 1]: string companyId_lei[0, 1]: string coordinates[0, 1]: string + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length[0, 1]: string shipto[0, 1]: name_attr[0, 1]: string name[1, 1]: string @@ -32,10 +33,11 @@ orders: companyId_bic[0, 1]: string companyId_lei[0, 1]: string coordinates[0, 1]: string + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length[0, 1]: string item[1, None]: product_name[1, 1]: string product_version[1, 1]: string - product_features_intfeature[0, None]: + product_features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length[0, None]: id[1, 1]: string value[1, 1]: integer product_features_stringfeature[0, None]: diff --git a/tests/sample_models/orders/orders_target_tree_version2.txt b/tests/sample_models/orders/orders_target_tree_version2.txt index be4f12d..290d877 100644 --- a/tests/sample_models/orders/orders_target_tree_version2.txt +++ b/tests/sample_models/orders/orders_target_tree_version2.txt @@ -16,6 +16,7 @@ orders: orderperson_companyId_type[0, 1]: string orderperson_companyId_value[0, 1]: string orderperson_coordinates[0, 1]: string + orderperson_a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length[0, 1]: string shipto[0, 1]: name_attr[0, 1]: string name[1, 1]: string @@ -29,11 +30,12 @@ orders: companyId_type[0, 1]: string companyId_value[0, 1]: string coordinates[0, 1]: string + a_very_long_field_type_that_makes_col_name_exceeds_max_identifier_length[0, 1]: string item[1, None]: product[1, 1]: name[1, 1]: string version[1, 1]: string - features_intfeature[0, None]: + features_intfeature_with_peculiarly_long_suffix_which_overflow_max_length[0, None]: id[1, 1]: string value[1, 1]: integer features_stringfeature[0, None]: diff --git a/tests/sample_models/orders/xml/order1.xml b/tests/sample_models/orders/xml/order1.xml index 6c04192..4b82fd8 100644 --- a/tests/sample_models/orders/xml/order1.xml +++ b/tests/sample_models/orders/xml/order1.xml @@ -19,18 +19,18 @@ product 1 regular - + length 60 - - + + width 40 - - + + weight 10 - + color red diff --git a/tests/sample_models/orders/xml/order3.xml b/tests/sample_models/orders/xml/order3.xml index 7cde833..f373357 100644 --- a/tests/sample_models/orders/xml/order3.xml +++ b/tests/sample_models/orders/xml/order3.xml @@ -21,6 +21,7 @@ JIDAZIO786DAZH 48.87271337163929 2.323433844198471 + test