Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merge pull request #376 from andrewgodwin/schema-alteration

Schema alteration
  • Loading branch information...
commit 9aa358cedd1ad93c0f4c20700db7016651dc0598 2 parents 57c82f9 + 5569b0b
@mjtamlyn mjtamlyn authored
Showing with 6,334 additions and 403 deletions.
  1. +7 −0 django/conf/global_settings.py
  2. +3 −3 django/contrib/auth/management/__init__.py
  3. +2 −2 django/contrib/contenttypes/management.py
  4. +1 −1  django/contrib/gis/db/backends/spatialite/creation.py
  5. +1 −1  django/contrib/gis/tests/layermap/tests.py
  6. +2 −2 django/contrib/sites/management.py
  7. +1 −1  django/core/management/commands/createcachetable.py
  8. +1 −1  django/core/management/commands/dumpdata.py
  9. +12 −12 django/core/management/commands/flush.py
  10. +1 −1  django/core/management/commands/loaddata.py
  11. +84 −0 django/core/management/commands/makemigrations.py
  12. +245 −0 django/core/management/commands/migrate.py
  13. +5 −145 django/core/management/commands/syncdb.py
  14. +8 −8 django/core/management/sql.py
  15. +47 −3 django/db/backends/__init__.py
  16. +8 −5 django/db/backends/creation.py
  17. +8 −0 django/db/backends/mysql/base.py
  18. +68 −2 django/db/backends/mysql/introspection.py
  19. +26 −0 django/db/backends/mysql/schema.py
  20. +10 −0 django/db/backends/oracle/base.py
  21. +11 −4 django/db/backends/oracle/creation.py
  22. +140 −0 django/db/backends/oracle/introspection.py
  23. +103 −0 django/db/backends/oracle/schema.py
  24. +7 −0 django/db/backends/postgresql_psycopg2/base.py
  25. +7 −2 django/db/backends/postgresql_psycopg2/creation.py
  26. +96 −1 django/db/backends/postgresql_psycopg2/introspection.py
  27. +5 −0 django/db/backends/postgresql_psycopg2/schema.py
  28. +729 −0 django/db/backends/schema.py
  29. +7 −0 django/db/backends/sqlite3/base.py
  30. +43 −2 django/db/backends/sqlite3/introspection.py
  31. +155 −0 django/db/backends/sqlite3/schema.py
  32. +2 −0  django/db/migrations/__init__.py
  33. +440 −0 django/db/migrations/autodetector.py
  34. +90 −0 django/db/migrations/executor.py
  35. +152 −0 django/db/migrations/graph.py
  36. +167 −0 django/db/migrations/loader.py
  37. +101 −0 django/db/migrations/migration.py
  38. +2 −0  django/db/migrations/operations/__init__.py
  39. +62 −0 django/db/migrations/operations/base.py
  40. +132 −0 django/db/migrations/operations/fields.py
  41. +157 −0 django/db/migrations/operations/models.py
  42. +69 −0 django/db/migrations/recorder.py
  43. +142 −0 django/db/migrations/state.py
  44. +180 −0 django/db/migrations/writer.py
  45. +4 −4 django/db/models/base.py
  46. +23 −3 django/db/models/fields/__init__.py
  47. +18 −6 django/db/models/fields/related.py
  48. +53 −10 django/db/models/loading.py
  49. +13 −5 django/db/models/options.py
  50. +4 −2 django/db/models/signals.py
  51. +5 −2 django/db/utils.py
  52. +5 −6 django/test/testcases.py
  53. +31 −0 django/utils/datastructures.py
  54. +5 −0 django/utils/functional.py
  55. +12 −0 django/utils/termcolors.py
  56. +2 −2 docs/howto/legacy-databases.txt
  57. +3 −0  docs/index.txt
  58. +2 −2 docs/internals/contributing/writing-documentation.txt
  59. +11 −0 docs/internals/deprecation.txt
  60. +4 −3 docs/intro/overview.txt
  61. +1 −1  docs/intro/reusable-apps.txt
  62. +4 −5 docs/man/django-admin.1
  63. +1 −1  docs/ref/contrib/comments/index.txt
  64. +1 −1  docs/ref/contrib/contenttypes.txt
  65. +2 −2 docs/ref/contrib/flatpages.txt
  66. +1 −1  docs/ref/contrib/index.txt
  67. +2 −2 docs/ref/contrib/redirects.txt
  68. +2 −2 docs/ref/contrib/sites.txt
  69. +3 −3 docs/ref/databases.txt
  70. +58 −46 docs/ref/django-admin.txt
  71. +6 −6 docs/ref/models/options.txt
  72. +54 −28 docs/ref/signals.txt
  73. +74 −0 docs/releases/1.7.txt
  74. +6 −8 docs/topics/auth/customizing.txt
  75. +5 −5 docs/topics/auth/default.txt
  76. +1 −1  docs/topics/auth/index.txt
  77. +2 −2 docs/topics/cache.txt
  78. +4 −3 docs/topics/db/models.txt
  79. +15 −6 docs/topics/db/multi-db.txt
  80. +1 −1  docs/topics/http/sessions.txt
  81. +1 −0  docs/topics/index.txt
  82. +2 −4 docs/topics/install.txt
  83. +297 −0 docs/topics/migrations.txt
  84. +1 −1  docs/topics/serialization.txt
  85. +7 −7 docs/topics/testing/advanced.txt
  86. +2 −2 docs/topics/testing/overview.txt
  87. 0  tests/app_cache/__init__.py
  88. +17 −0 tests/app_cache/models.py
  89. +44 −0 tests/app_cache/tests.py
  90. +1 −1  tests/cache/tests.py
  91. 0  tests/migrations/__init__.py
  92. 0  tests/migrations/models.py
  93. +274 −0 tests/migrations/test_autodetector.py
  94. +41 −0 tests/migrations/test_base.py
  95. +37 −0 tests/migrations/test_commands.py
  96. +77 −0 tests/migrations/test_executor.py
  97. +135 −0 tests/migrations/test_graph.py
  98. +79 −0 tests/migrations/test_loader.py
  99. +27 −0 tests/migrations/test_migrations/0001_initial.py
  100. +24 −0 tests/migrations/test_migrations/0002_second.py
  101. 0  tests/migrations/test_migrations/__init__.py
  102. +21 −0 tests/migrations/test_migrations_2/0001_initial.py
  103. 0  tests/migrations/test_migrations_2/__init__.py
  104. +327 −0 tests/migrations/test_operations.py
  105. +77 −0 tests/migrations/test_state.py
  106. +84 −0 tests/migrations/test_writer.py
  107. +23 −23 tests/multiple_database/tests.py
  108. 0  tests/schema/__init__.py
  109. +97 −0 tests/schema/models.py
  110. +650 −0 tests/schema/tests.py
  111. +12 −0 tests/utils_tests/test_functional.py
View
7 django/conf/global_settings.py
@@ -609,3 +609,10 @@
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
+
+##############
+# MIGRATIONS #
+##############
+
+# Migration module overrides for apps, by app label.
+MIGRATION_MODULES = {}
View
6 django/contrib/auth/management/__init__.py
@@ -65,7 +65,7 @@ def create_permissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kw
except UnavailableApp:
return
- if not router.allow_syncdb(db, auth_app.Permission):
+ if not router.allow_migrate(db, auth_app.Permission):
return
from django.contrib.contenttypes.models import ContentType
@@ -188,7 +188,7 @@ def get_default_username(check_db=True):
return ''
return default_username
-signals.post_syncdb.connect(create_permissions,
+signals.post_migrate.connect(create_permissions,
dispatch_uid="django.contrib.auth.management.create_permissions")
-signals.post_syncdb.connect(create_superuser,
+signals.post_migrate.connect(create_superuser,
sender=auth_app, dispatch_uid="django.contrib.auth.management.create_superuser")
View
4 django/contrib/contenttypes/management.py
@@ -16,7 +16,7 @@ def update_contenttypes(app, created_models, verbosity=2, db=DEFAULT_DB_ALIAS, *
except UnavailableApp:
return
- if not router.allow_syncdb(db, ContentType):
+ if not router.allow_migrate(db, ContentType):
return
ContentType.objects.clear_cache()
@@ -88,7 +88,7 @@ def update_all_contenttypes(verbosity=2, **kwargs):
for app in get_apps():
update_contenttypes(app, None, verbosity, **kwargs)
-signals.post_syncdb.connect(update_contenttypes)
+signals.post_migrate.connect(update_contenttypes)
if __name__ == "__main__":
update_all_contenttypes()
View
2  django/contrib/gis/db/backends/spatialite/creation.py
@@ -47,7 +47,7 @@ def create_test_db(self, verbosity=1, autoclobber=False):
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
- # test fixtures, or autogenerated from post_syncdb triggers.
+ # test fixtures, or autogenerated from post_migrate triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
View
2  django/contrib/gis/tests/layermap/tests.py
@@ -311,7 +311,7 @@ def db_for_write(self, model, **hints):
def allow_relation(self, obj1, obj2, **hints):
return None
- def allow_syncdb(self, db, model):
+ def allow_migrate(self, db, model):
return True
View
4 django/contrib/sites/management.py
@@ -11,7 +11,7 @@
def create_default_site(app, created_models, verbosity, db, **kwargs):
# Only create the default sites in databases where Django created the table
- if Site in created_models and router.allow_syncdb(db, Site) :
+ if Site in created_models and router.allow_migrate(db, Site) :
# The default settings set SITE_ID = 1, and some tests in Django's test
# suite rely on this value. However, if database sequences are reused
# (e.g. in the test suite after flush/syncdb), it isn't guaranteed that
@@ -33,4 +33,4 @@ def create_default_site(app, created_models, verbosity, db, **kwargs):
Site.objects.clear_cache()
-signals.post_syncdb.connect(create_default_site, sender=site_app)
+signals.post_migrate.connect(create_default_site, sender=site_app)
View
2  django/core/management/commands/createcachetable.py
@@ -24,7 +24,7 @@ class Command(LabelCommand):
def handle_label(self, tablename, **options):
db = options.get('database')
cache = BaseDatabaseCache(tablename, {})
- if not router.allow_syncdb(db, cache.cache_model_class):
+ if not router.allow_migrate(db, cache.cache_model_class):
return
connection = connections[db]
fields = (
View
2  django/core/management/commands/dumpdata.py
@@ -118,7 +118,7 @@ def get_objects():
for model in sort_dependencies(app_list.items()):
if model in excluded_models:
continue
- if not model._meta.proxy and router.allow_syncdb(using, model):
+ if not model._meta.proxy and router.allow_migrate(using, model):
if use_base_manager:
objects = model._base_manager
else:
View
24 django/core/management/commands/flush.py
@@ -7,7 +7,7 @@
from django.core.management import call_command
from django.core.management.base import NoArgsCommand, CommandError
from django.core.management.color import no_style
-from django.core.management.sql import sql_flush, emit_post_sync_signal
+from django.core.management.sql import sql_flush, emit_post_migrate_signal
from django.utils.six.moves import input
from django.utils import six
@@ -23,8 +23,8 @@ class Command(NoArgsCommand):
help='Tells Django not to load any initial data after database synchronization.'),
)
help = ('Returns the database to the state it was in immediately after '
- 'syncdb was executed. This means that all data will be removed '
- 'from the database, any post-synchronization handlers will be '
+ 'migrate was first executed. This means that all data will be removed '
+ 'from the database, any post-migration handlers will be '
're-executed, and the initial_data fixture will be re-installed.')
def handle_noargs(self, **options):
@@ -35,7 +35,7 @@ def handle_noargs(self, **options):
# The following are stealth options used by Django's internals.
reset_sequences = options.get('reset_sequences', True)
allow_cascade = options.get('allow_cascade', False)
- inhibit_post_syncdb = options.get('inhibit_post_syncdb', False)
+ inhibit_post_migrate = options.get('inhibit_post_migrate', False)
self.style = no_style()
@@ -54,7 +54,7 @@ def handle_noargs(self, **options):
if interactive:
confirm = input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
-and return each table to the state it was in after syncdb.
+and return each table to a fresh state.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
@@ -77,8 +77,8 @@ def handle_noargs(self, **options):
"The full error: %s") % (connection.settings_dict['NAME'], e)
six.reraise(CommandError, CommandError(new_msg), sys.exc_info()[2])
- if not inhibit_post_syncdb:
- self.emit_post_syncdb(verbosity, interactive, db)
+ if not inhibit_post_migrate:
+ self.emit_post_migrate(verbosity, interactive, db)
# Reinstall the initial_data fixture.
if options.get('load_initial_data'):
@@ -89,13 +89,13 @@ def handle_noargs(self, **options):
self.stdout.write("Flush cancelled.\n")
@staticmethod
- def emit_post_syncdb(verbosity, interactive, database):
- # Emit the post sync signal. This allows individual applications to
- # respond as if the database had been sync'd from scratch.
+ def emit_post_migrate(verbosity, interactive, database):
+ # Emit the post migrate signal. This allows individual applications to
+ # respond as if the database had been migrated from scratch.
all_models = []
for app in models.get_apps():
all_models.extend([
m for m in models.get_models(app, include_auto_created=True)
- if router.allow_syncdb(database, m)
+ if router.allow_migrate(database, m)
])
- emit_post_sync_signal(set(all_models), verbosity, interactive, database)
+ emit_post_migrate_signal(set(all_models), verbosity, interactive, database)
View
2  django/core/management/commands/loaddata.py
@@ -134,7 +134,7 @@ def load_label(self, fixture_label):
for obj in objects:
objects_in_fixture += 1
- if router.allow_syncdb(self.using, obj.object.__class__):
+ if router.allow_migrate(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:
View
84 django/core/management/commands/makemigrations.py
@@ -0,0 +1,84 @@
+import sys
+import os
+from optparse import make_option
+
+from django.core.management.base import BaseCommand
+from django.core.exceptions import ImproperlyConfigured
+from django.db import connections, DEFAULT_DB_ALIAS
+from django.db.migrations.loader import MigrationLoader
+from django.db.migrations.autodetector import MigrationAutodetector, InteractiveMigrationQuestioner
+from django.db.migrations.state import ProjectState
+from django.db.migrations.writer import MigrationWriter
+from django.db.models.loading import cache
+
+
+class Command(BaseCommand):
+ option_list = BaseCommand.option_list + (
+ make_option('--empty', action='store_true', dest='empty', default=False,
+ help='Make a blank migration.'),
+ )
+
+ help = "Creates new migration(s) for apps."
+ usage_str = "Usage: ./manage.py makemigrations [--empty] [app [app ...]]"
+
+ def handle(self, *app_labels, **options):
+
+ self.verbosity = int(options.get('verbosity'))
+ self.interactive = options.get('interactive')
+
+ # Make sure the app they asked for exists
+ app_labels = set(app_labels)
+ bad_app_labels = set()
+ for app_label in app_labels:
+ try:
+ cache.get_app(app_label)
+ except ImproperlyConfigured:
+ bad_app_labels.add(app_label)
+ if bad_app_labels:
+ for app_label in bad_app_labels:
+ self.stderr.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % app_label)
+ sys.exit(2)
+
+ # Load the current graph state. Takes a connection, but it's not used
+ # (makemigrations doesn't look at the database state).
+ loader = MigrationLoader(connections[DEFAULT_DB_ALIAS])
+
+ # Detect changes
+ autodetector = MigrationAutodetector(
+ loader.graph.project_state(),
+ ProjectState.from_app_cache(cache),
+ InteractiveMigrationQuestioner(specified_apps=app_labels),
+ )
+ changes = autodetector.changes(graph=loader.graph, trim_to_apps=app_labels or None)
+
+ # No changes? Tell them.
+ if not changes:
+ if len(app_labels) == 1:
+ self.stdout.write("No changes detected in app '%s'" % app_labels.pop())
+ elif len(app_labels) > 1:
+ self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels)))
+ else:
+ self.stdout.write("No changes detected")
+ return
+
+ directory_created = {}
+ for app_label, migrations in changes.items():
+ self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label) + "\n")
+ for migration in migrations:
+ # Describe the migration
+ writer = MigrationWriter(migration)
+ self.stdout.write(" %s:\n" % (self.style.MIGRATE_LABEL(writer.filename),))
+ for operation in migration.operations:
+ self.stdout.write(" - %s\n" % operation.describe())
+ # Write it
+ migrations_directory = os.path.dirname(writer.path)
+ if not directory_created.get(app_label, False):
+ if not os.path.isdir(migrations_directory):
+ os.mkdir(migrations_directory)
+ init_path = os.path.join(migrations_directory, "__init__.py")
+ if not os.path.isfile(init_path):
+ open(init_path, "w").close()
+ # We just do this once per app
+ directory_created[app_label] = True
+ with open(writer.path, "w") as fh:
+ fh.write(writer.as_string())
View
245 django/core/management/commands/migrate.py
@@ -0,0 +1,245 @@
+from optparse import make_option
+from collections import OrderedDict
+from importlib import import_module
+import itertools
+import traceback
+
+from django.conf import settings
+from django.core.management import call_command
+from django.core.management.base import BaseCommand, CommandError
+from django.core.management.color import no_style
+from django.core.management.sql import custom_sql_for_model, emit_post_migrate_signal, emit_pre_migrate_signal
+from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
+from django.db.migrations.executor import MigrationExecutor
+from django.db.migrations.loader import AmbiguityError
+from django.utils.module_loading import module_has_submodule
+
+
+class Command(BaseCommand):
+ option_list = BaseCommand.option_list + (
+ make_option('--noinput', action='store_false', dest='interactive', default=True,
+ help='Tells Django to NOT prompt the user for input of any kind.'),
+ make_option('--no-initial-data', action='store_false', dest='load_initial_data', default=True,
+ help='Tells Django not to load any initial data after database synchronization.'),
+ make_option('--database', action='store', dest='database',
+ default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
+ 'Defaults to the "default" database.'),
+ make_option('--fake', action='store_true', dest='fake', default=False,
+ help='Mark migrations as run without actually running them'),
+ )
+
+ help = "Updates database schema. Manages both apps with migrations and those without."
+
+ def handle(self, *args, **options):
+
+ self.verbosity = int(options.get('verbosity'))
+ self.interactive = options.get('interactive')
+ self.show_traceback = options.get('traceback')
+ self.load_initial_data = options.get('load_initial_data')
+ self.test_database = options.get('test_database', False)
+
+ # Import the 'management' module within each installed app, to register
+ # dispatcher events.
+ for app_name in settings.INSTALLED_APPS:
+ if module_has_submodule(import_module(app_name), "management"):
+ import_module('.management', app_name)
+
+ # Get the database we're operating from
+ db = options.get('database')
+ connection = connections[db]
+
+ # Work out which apps have migrations and which do not
+ executor = MigrationExecutor(connection, self.migration_progress_callback)
+
+ # If they supplied command line arguments, work out what they mean.
+ run_syncdb = False
+ target_app_labels_only = True
+ if len(args) > 2:
+ raise CommandError("Too many command-line arguments (expecting 'appname' or 'appname migrationname')")
+ elif len(args) == 2:
+ app_label, migration_name = args
+ if app_label not in executor.loader.migrated_apps:
+ raise CommandError("App '%s' does not have migrations (you cannot selectively sync unmigrated apps)" % app_label)
+ if migration_name == "zero":
+ targets = [(app_label, None)]
+ else:
+ try:
+ migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
+ except AmbiguityError:
+ raise CommandError("More than one migration matches '%s' in app '%s'. Please be more specific." % (app_label, migration_name))
+ except KeyError:
+ raise CommandError("Cannot find a migration matching '%s' from app '%s'. Is it in INSTALLED_APPS?" % (app_label, migration_name))
+ targets = [(app_label, migration.name)]
+ target_app_labels_only = False
+ elif len(args) == 1:
+ app_label = args[0]
+ if app_label not in executor.loader.migrated_apps:
+ raise CommandError("App '%s' does not have migrations (you cannot selectively sync unmigrated apps)" % app_label)
+ targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
+ else:
+ targets = executor.loader.graph.leaf_nodes()
+ run_syncdb = True
+
+ plan = executor.migration_plan(targets)
+
+ # Print some useful info
+ if self.verbosity >= 1:
+ self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
+ if run_syncdb:
+ self.stdout.write(self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") + (", ".join(executor.loader.unmigrated_apps) or "(none)"))
+ if target_app_labels_only:
+ self.stdout.write(self.style.MIGRATE_LABEL(" Apply all migrations: ") + (", ".join(set(a for a, n in targets)) or "(none)"))
+ else:
+ if targets[0][1] is None:
+ self.stdout.write(self.style.MIGRATE_LABEL(" Unapply all migrations: ") + "%s" % (targets[0][0], ))
+ else:
+ self.stdout.write(self.style.MIGRATE_LABEL(" Target specific migration: ") + "%s, from %s" % (targets[0][1], targets[0][0]))
+
+ # Run the syncdb phase.
+ # If you ever manage to get rid of this, I owe you many, many drinks.
+ # Note that pre_migrate is called from inside here, as it needs
+ # the list of models about to be installed.
+ if run_syncdb:
+ if self.verbosity >= 1:
+ self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
+ created_models = self.sync_apps(connection, executor.loader.unmigrated_apps)
+ else:
+ created_models = []
+
+ # Migrate!
+ if self.verbosity >= 1:
+ self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
+ if not plan:
+ if self.verbosity >= 1:
+ self.stdout.write(" No migrations needed.")
+ else:
+ executor.migrate(targets, plan, fake=options.get("fake", False))
+
+ # Send the post_migrate signal, so individual apps can do whatever they need
+ # to do at this point.
+ emit_post_migrate_signal(created_models, self.verbosity, self.interactive, connection.alias)
+
+ def migration_progress_callback(self, action, migration):
+ if self.verbosity >= 1:
+ if action == "apply_start":
+ self.stdout.write(" Applying %s..." % migration, ending="")
+ self.stdout.flush()
+ elif action == "apply_success":
+ self.stdout.write(self.style.MIGRATE_SUCCESS(" OK"))
+ elif action == "unapply_start":
+ self.stdout.write(" Unapplying %s..." % migration, ending="")
+ self.stdout.flush()
+ elif action == "unapply_success":
+ self.stdout.write(self.style.MIGRATE_SUCCESS(" OK"))
+
+ def sync_apps(self, connection, apps):
+ "Runs the old syncdb-style operation on a list of apps."
+ cursor = connection.cursor()
+
+ # Get a list of already installed *models* so that references work right.
+ tables = connection.introspection.table_names()
+ seen_models = connection.introspection.installed_models(tables)
+ created_models = set()
+ pending_references = {}
+
+ # Build the manifest of apps and models that are to be synchronized
+ all_models = [
+ (app.__name__.split('.')[-2],
+ [
+ m for m in models.get_models(app, include_auto_created=True)
+ if router.allow_migrate(connection.alias, m)
+ ])
+ for app in models.get_apps() if app.__name__.split('.')[-2] in apps
+ ]
+
+ def model_installed(model):
+ opts = model._meta
+ converter = connection.introspection.table_name_converter
+ # Note that if a model is unmanaged we short-circuit and never try to install it
+ return not ((converter(opts.db_table) in tables) or
+ (opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
+
+ manifest = OrderedDict(
+ (app_name, list(filter(model_installed, model_list)))
+ for app_name, model_list in all_models
+ )
+
+ create_models = set([x for x in itertools.chain(*manifest.values())])
+ emit_pre_migrate_signal(create_models, self.verbosity, self.interactive, connection.alias)
+
+ # Create the tables for each model
+ if self.verbosity >= 1:
+ self.stdout.write(" Creating tables...\n")
+ with transaction.atomic(using=connection.alias, savepoint=False):
+ for app_name, model_list in manifest.items():
+ for model in model_list:
+ # Create the model's database table, if it doesn't already exist.
+ if self.verbosity >= 3:
+ self.stdout.write(" Processing %s.%s model\n" % (app_name, model._meta.object_name))
+ sql, references = connection.creation.sql_create_model(model, no_style(), seen_models)
+ seen_models.add(model)
+ created_models.add(model)
+ for refto, refs in references.items():
+ pending_references.setdefault(refto, []).extend(refs)
+ if refto in seen_models:
+ sql.extend(connection.creation.sql_for_pending_references(refto, no_style(), pending_references))
+ sql.extend(connection.creation.sql_for_pending_references(model, no_style(), pending_references))
+ if self.verbosity >= 1 and sql:
+ self.stdout.write(" Creating table %s\n" % model._meta.db_table)
+ for statement in sql:
+ cursor.execute(statement)
+ tables.append(connection.introspection.table_name_converter(model._meta.db_table))
+
+ # We force a commit here, as that was the previous behaviour.
+ # If you can prove we don't need this, remove it.
+ transaction.set_dirty(using=connection.alias)
+
+ # The connection may have been closed by a syncdb handler.
+ cursor = connection.cursor()
+
+ # Install custom SQL for the app (but only if this
+ # is a model we've just created)
+ if self.verbosity >= 1:
+ self.stdout.write(" Installing custom SQL...\n")
+ for app_name, model_list in manifest.items():
+ for model in model_list:
+ if model in created_models:
+ custom_sql = custom_sql_for_model(model, no_style(), connection)
+ if custom_sql:
+ if self.verbosity >= 2:
+ self.stdout.write(" Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
+ try:
+ with transaction.commit_on_success_unless_managed(using=connection.alias):
+ for sql in custom_sql:
+ cursor.execute(sql)
+ except Exception as e:
+ self.stderr.write(" Failed to install custom SQL for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
+ if self.show_traceback:
+ traceback.print_exc()
+ else:
+ if self.verbosity >= 3:
+ self.stdout.write(" No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
+
+ if self.verbosity >= 1:
+ self.stdout.write(" Installing indexes...\n")
+
+ # Install SQL indices for all newly created models
+ for app_name, model_list in manifest.items():
+ for model in model_list:
+ if model in created_models:
+ index_sql = connection.creation.sql_indexes_for_model(model, no_style())
+ if index_sql:
+ if self.verbosity >= 2:
+ self.stdout.write(" Installing index for %s.%s model\n" % (app_name, model._meta.object_name))
+ try:
+ with transaction.commit_on_success_unless_managed(using=connection.alias):
+ for sql in index_sql:
+ cursor.execute(sql)
+ except Exception as e:
+ self.stderr.write(" Failed to install index for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
+
+ # Load initial_data fixtures (unless that has been disabled)
+ if self.load_initial_data:
+ call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=connection.alias, skip_validation=True)
+
+ return created_models
View
150 django/core/management/commands/syncdb.py
@@ -1,15 +1,8 @@
-from collections import OrderedDict
-from importlib import import_module
+import warnings
from optparse import make_option
-import itertools
-import traceback
-
-from django.conf import settings
+from django.db import DEFAULT_DB_ALIAS
from django.core.management import call_command
from django.core.management.base import NoArgsCommand
-from django.core.management.color import no_style
-from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal, emit_pre_sync_signal
-from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
class Command(NoArgsCommand):
@@ -22,141 +15,8 @@ class Command(NoArgsCommand):
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
- help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
+ help = "Deprecated - use 'migrate' instead."
def handle_noargs(self, **options):
-
- verbosity = int(options.get('verbosity'))
- interactive = options.get('interactive')
- show_traceback = options.get('traceback')
- load_initial_data = options.get('load_initial_data')
-
- self.style = no_style()
-
- # Import the 'management' module within each installed app, to register
- # dispatcher events.
- for app_name in settings.INSTALLED_APPS:
- try:
- import_module('.management', app_name)
- except ImportError as exc:
- # This is slightly hackish. We want to ignore ImportErrors
- # if the "management" module itself is missing -- but we don't
- # want to ignore the exception if the management module exists
- # but raises an ImportError for some reason. The only way we
- # can do this is to check the text of the exception. Note that
- # we're a bit broad in how we check the text, because different
- # Python implementations may not use the same text.
- # CPython uses the text "No module named management"
- # PyPy uses "No module named myproject.myapp.management"
- msg = exc.args[0]
- if not msg.startswith('No module named') or 'management' not in msg:
- raise
-
- db = options.get('database')
- connection = connections[db]
- cursor = connection.cursor()
-
- # Get a list of already installed *models* so that references work right.
- tables = connection.introspection.table_names()
- seen_models = connection.introspection.installed_models(tables)
- created_models = set()
- pending_references = {}
-
- # Build the manifest of apps and models that are to be synchronized
- all_models = [
- (app.__name__.split('.')[-2],
- [m for m in models.get_models(app, include_auto_created=True)
- if router.allow_syncdb(db, m)])
- for app in models.get_apps()
- ]
-
- def model_installed(model):
- opts = model._meta
- converter = connection.introspection.table_name_converter
- return not ((converter(opts.db_table) in tables) or
- (opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
-
- manifest = OrderedDict(
- (app_name, list(filter(model_installed, model_list)))
- for app_name, model_list in all_models
- )
-
- create_models = set([x for x in itertools.chain(*manifest.values())])
- emit_pre_sync_signal(create_models, verbosity, interactive, db)
-
- # Create the tables for each model
- if verbosity >= 1:
- self.stdout.write("Creating tables ...\n")
- with transaction.commit_on_success_unless_managed(using=db):
- for app_name, model_list in manifest.items():
- for model in model_list:
- # Create the model's database table, if it doesn't already exist.
- if verbosity >= 3:
- self.stdout.write("Processing %s.%s model\n" % (app_name, model._meta.object_name))
- sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
- seen_models.add(model)
- created_models.add(model)
- for refto, refs in references.items():
- pending_references.setdefault(refto, []).extend(refs)
- if refto in seen_models:
- sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
- sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
- if verbosity >= 1 and sql:
- self.stdout.write("Creating table %s\n" % model._meta.db_table)
- for statement in sql:
- cursor.execute(statement)
- tables.append(connection.introspection.table_name_converter(model._meta.db_table))
-
- # Send the post_syncdb signal, so individual apps can do whatever they need
- # to do at this point.
- emit_post_sync_signal(created_models, verbosity, interactive, db)
-
- # The connection may have been closed by a syncdb handler.
- cursor = connection.cursor()
-
- # Install custom SQL for the app (but only if this
- # is a model we've just created)
- if verbosity >= 1:
- self.stdout.write("Installing custom SQL ...\n")
- for app_name, model_list in manifest.items():
- for model in model_list:
- if model in created_models:
- custom_sql = custom_sql_for_model(model, self.style, connection)
- if custom_sql:
- if verbosity >= 2:
- self.stdout.write("Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
- try:
- with transaction.commit_on_success_unless_managed(using=db):
- for sql in custom_sql:
- cursor.execute(sql)
- except Exception as e:
- self.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
- (app_name, model._meta.object_name, e))
- if show_traceback:
- traceback.print_exc()
- else:
- if verbosity >= 3:
- self.stdout.write("No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
-
- if verbosity >= 1:
- self.stdout.write("Installing indexes ...\n")
- # Install SQL indices for all newly created models
- for app_name, model_list in manifest.items():
- for model in model_list:
- if model in created_models:
- index_sql = connection.creation.sql_indexes_for_model(model, self.style)
- if index_sql:
- if verbosity >= 2:
- self.stdout.write("Installing index for %s.%s model\n" % (app_name, model._meta.object_name))
- try:
- with transaction.commit_on_success_unless_managed(using=db):
- for sql in index_sql:
- cursor.execute(sql)
- except Exception as e:
- self.stderr.write("Failed to install index for %s.%s model: %s\n" % \
- (app_name, model._meta.object_name, e))
-
- # Load initial_data fixtures (unless that has been disabled)
- if load_initial_data:
- call_command('loaddata', 'initial_data', verbosity=verbosity,
- database=db, skip_validation=True)
+ warnings.warn("The syncdb command will be removed in Django 1.9", PendingDeprecationWarning)
+ call_command("migrate", **options)
View
16 django/core/management/sql.py
@@ -206,25 +206,25 @@ def custom_sql_for_model(model, style, connection):
return output
-def emit_pre_sync_signal(create_models, verbosity, interactive, db):
- # Emit the pre_sync signal for every application.
+def emit_pre_migrate_signal(create_models, verbosity, interactive, db):
+ # Emit the pre_migrate signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
- print("Running pre-sync handlers for application %s" % app_name)
- models.signals.pre_syncdb.send(sender=app, app=app,
+ print("Running pre-migrate handlers for application %s" % app_name)
+ models.signals.pre_migrate.send(sender=app, app=app,
create_models=create_models,
verbosity=verbosity,
interactive=interactive,
db=db)
-def emit_post_sync_signal(created_models, verbosity, interactive, db):
- # Emit the post_sync signal for every application.
+def emit_post_migrate_signal(created_models, verbosity, interactive, db):
+ # Emit the post_migrate signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
- print("Running post-sync handlers for application %s" % app_name)
- models.signals.post_syncdb.send(sender=app, app=app,
+ print("Running post-migrate handlers for application %s" % app_name)
+ models.signals.post_migrate.send(sender=app, app=app,
created_models=created_models, verbosity=verbosity,
interactive=interactive, db=db)
View
50 django/db/backends/__init__.py
@@ -521,6 +521,10 @@ def _start_transaction_under_autocommit(self):
"""
raise NotImplementedError
+ def schema_editor(self):
+ "Returns a new instance of this backend's SchemaEditor"
+ raise NotImplementedError()
+
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
@@ -630,11 +634,32 @@ class BaseDatabaseFeatures(object):
# when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
autocommits_when_autocommit_is_off = False
+ # Can we roll back DDL in a transaction?
+ can_rollback_ddl = False
+
+ # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
+ supports_combined_alters = False
+
+ # What's the maximum length for index names?
+ max_index_name_length = 63
+
+ # Does it support foreign keys?
+ supports_foreign_keys = True
+
+ # Does it support CHECK constraints?
+ supports_check_constraints = True
+
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
+ # Does the backend require literal defaults, rather than parameterised ones?
+ requires_literal_defaults = False
+
+ # Does the backend require a connection reset after each material schema change?
+ connection_persists_old_columns = False
+
def __init__(self, connection):
self.connection = connection
@@ -1227,7 +1252,7 @@ def django_table_names(self, only_existing=False):
for model in models.get_models(app):
if not model._meta.managed:
continue
- if not router.allow_syncdb(self.connection.alias, model):
+ if not router.allow_migrate(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
@@ -1247,7 +1272,7 @@ def installed_models(self, tables):
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
- if router.allow_syncdb(self.connection.alias, model):
+ if router.allow_migrate(self.connection.alias, model):
all_models.append(model)
tables = list(map(self.table_name_converter, tables))
return set([
@@ -1268,7 +1293,7 @@ def sequence_list(self):
continue
if model._meta.swapped:
continue
- if not router.allow_syncdb(self.connection.alias, model):
+ if not router.allow_migrate(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
@@ -1310,6 +1335,25 @@ def get_indexes(self, cursor, table_name):
"""
raise NotImplementedError
+ def get_constraints(self, cursor, table_name):
+ """
+ Retrieves any constraints or keys (unique, pk, fk, check, index)
+ across one or more columns.
+
+ Returns a dict mapping constraint names to their attributes,
+ where attributes is a dict with keys:
+ * columns: List of columns this covers
+ * primary_key: True if primary key, False otherwise
+ * unique: True if this is a unique constraint, False otherwise
+ * foreign_key: (table, column) of target, or None
+ * check: True if check constraint, False otherwise
+ * index: True if index, False otherwise.
+
+ Some backends may return special constraint names that don't exist
+ if they don't name constraints of a certain type (e.g. SQLite)
+ """
+ raise NotImplementedError
+
class BaseDatabaseClient(object):
"""
View
13 django/db/backends/creation.py
@@ -23,11 +23,13 @@ class BaseDatabaseCreation(object):
destruction of test databases.
"""
data_types = {}
+ data_type_check_constraints = {}
def __init__(self, connection):
self.connection = connection
- def _digest(self, *args):
+ @classmethod
+ def _digest(cls, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
@@ -330,18 +332,19 @@ def create_test_db(self, verbosity=1, autoclobber=False):
settings.DATABASES[self.connection.alias]["NAME"] = test_database_name
self.connection.settings_dict["NAME"] = test_database_name
- # Report syncdb messages at one level lower than that requested.
+ # Report migrate messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
- call_command('syncdb',
+ call_command('migrate',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
- load_initial_data=False)
+ load_initial_data=False,
+ test_database=True)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
- # test fixtures, or autogenerated from post_syncdb triggers.
+ # test fixtures, or autogenerated from post_migrate triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
View
8 django/db/backends/mysql/base.py
@@ -44,6 +44,9 @@
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.encoding import force_str, force_text
+from django.db.backends.mysql.schema import DatabaseSchemaEditor
+from django.utils.encoding import force_str
+from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
from django.utils import six
from django.utils import timezone
@@ -171,6 +174,7 @@ class DatabaseFeatures(BaseDatabaseFeatures):
requires_explicit_null_ordering_when_grouping = True
allows_primary_key_0 = False
uses_savepoints = True
+ supports_check_constraints = False
def __init__(self, connection):
super(DatabaseFeatures, self).__init__(connection)
@@ -514,6 +518,10 @@ def check_constraints(self, table_names=None):
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
+ def schema_editor(self):
+ "Returns a new instance of this backend's SchemaEditor"
+ return DatabaseSchemaEditor(self)
+
def is_usable(self):
try:
self.connection.ping()
View
70 django/db/backends/mysql/introspection.py
@@ -1,6 +1,6 @@
import re
from .base import FIELD_TYPE
-
+from django.utils.datastructures import OrderedSet
from django.db.backends import BaseDatabaseIntrospection, FieldInfo
from django.utils.encoding import force_text
@@ -115,5 +115,71 @@ def get_indexes(self, cursor, table_name):
for row in rows:
if row[2] in multicol_indexes:
continue
- indexes[row[4]] = {'primary_key': (row[2] == 'PRIMARY'), 'unique': not bool(row[1])}
+ if row[4] not in indexes:
+ indexes[row[4]] = {'primary_key': False, 'unique': False}
+ # It's possible to have the unique and PK constraints in separate indexes.
+ if row[2] == 'PRIMARY':
+ indexes[row[4]]['primary_key'] = True
+ if not bool(row[1]):
+ indexes[row[4]]['unique'] = True
return indexes
+
+ def get_constraints(self, cursor, table_name):
+ """
+ Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
+ """
+ constraints = {}
+ # Get the actual constraint names and columns
+ name_query = """
+ SELECT kc.`constraint_name`, kc.`column_name`,
+ kc.`referenced_table_name`, kc.`referenced_column_name`
+ FROM information_schema.key_column_usage AS kc
+ WHERE
+ kc.table_schema = %s AND
+ kc.table_name = %s
+ """
+ cursor.execute(name_query, [self.connection.settings_dict['NAME'], table_name])
+ for constraint, column, ref_table, ref_column in cursor.fetchall():
+ if constraint not in constraints:
+ constraints[constraint] = {
+ 'columns': OrderedSet(),
+ 'primary_key': False,
+ 'unique': False,
+ 'index': False,
+ 'check': False,
+ 'foreign_key': (ref_table, ref_column) if ref_column else None,
+ }
+ constraints[constraint]['columns'].add(column)
+ # Now get the constraint types
+ type_query = """
+ SELECT c.constraint_name, c.constraint_type
+ FROM information_schema.table_constraints AS c
+ WHERE
+ c.table_schema = %s AND
+ c.table_name = %s
+ """
+ cursor.execute(type_query, [self.connection.settings_dict['NAME'], table_name])
+ for constraint, kind in cursor.fetchall():
+ if kind.lower() == "primary key":
+ constraints[constraint]['primary_key'] = True
+ constraints[constraint]['unique'] = True
+ elif kind.lower() == "unique":
+ constraints[constraint]['unique'] = True
+ # Now add in the indexes
+ cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
+ for table, non_unique, index, colseq, column in [x[:5] for x in cursor.fetchall()]:
+ if index not in constraints:
+ constraints[index] = {
+ 'columns': OrderedSet(),
+ 'primary_key': False,
+ 'unique': False,
+ 'index': True,
+ 'check': False,
+ 'foreign_key': None,
+ }
+ constraints[index]['index'] = True
+ constraints[index]['columns'].add(column)
+ # Convert the sorted sets to lists
+ for constraint in constraints.values():
+ constraint['columns'] = list(constraint['columns'])
+ return constraints
View
26 django/db/backends/mysql/schema.py
@@ -0,0 +1,26 @@
+from django.db.backends.schema import BaseDatabaseSchemaEditor
+
+
+class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
+
+ sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
+
+ sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
+ sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
+ sql_alter_column_type = "MODIFY %(column)s %(type)s"
+ sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s"
+
+ sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
+
+ sql_create_fk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) REFERENCES %(to_table)s (%(to_column)s)"
+ sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
+
+ sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
+
+ sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
+
+ alter_string_set_null = 'MODIFY %(column)s %(type)s NULL;'
+ alter_string_drop_null = 'MODIFY %(column)s %(type)s NOT NULL;'
+
+ sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
+ sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
View
10 django/db/backends/oracle/base.py
@@ -55,6 +55,7 @@ def _setup_environment(environ):
from django.db.backends.oracle.client import DatabaseClient
from django.db.backends.oracle.creation import DatabaseCreation
from django.db.backends.oracle.introspection import DatabaseIntrospection
+from django.db.backends.oracle.schema import DatabaseSchemaEditor
from django.utils.encoding import force_bytes, force_text
@@ -90,6 +91,11 @@ class DatabaseFeatures(BaseDatabaseFeatures):
has_bulk_insert = True
supports_tablespaces = True
supports_sequence_reset = False
+ supports_combined_alters = False
+ max_index_name_length = 30
+ nulls_order_largest = True
+ requires_literal_defaults = True
+ connection_persists_old_columns = True
nulls_order_largest = True
@@ -621,6 +627,10 @@ def _commit(self):
and x.code == 2091 and 'ORA-02291' in x.message:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
+
+ def schema_editor(self):
+ "Returns a new instance of this backend's SchemaEditor"
+ return DatabaseSchemaEditor(self)
# Oracle doesn't support savepoint commits. Ignore them.
def _savepoint_commit(self, sid):
View
15 django/db/backends/oracle/creation.py
@@ -22,7 +22,7 @@ class DatabaseCreation(BaseDatabaseCreation):
data_types = {
'AutoField': 'NUMBER(11)',
'BinaryField': 'BLOB',
- 'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
+ 'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
@@ -35,10 +35,10 @@ class DatabaseCreation(BaseDatabaseCreation):
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
- 'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
+ 'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
- 'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
- 'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
+ 'PositiveIntegerField': 'NUMBER(11)',
+ 'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
@@ -46,6 +46,13 @@ class DatabaseCreation(BaseDatabaseCreation):
'URLField': 'VARCHAR2(%(max_length)s)',
}
+ data_type_check_constraints = {
+ 'BooleanField': '%(qn_column)s IN (0,1)',
+ 'NullBooleanField': '(%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL)',
+ 'PositiveIntegerField': '%(qn_column)s >= 0',
+ 'PositiveSmallIntegerField': '%(qn_column)s >= 0',
+ }
+
def __init__(self, connection):
super(DatabaseCreation, self).__init__(connection)
View
140 django/db/backends/oracle/introspection.py
@@ -134,3 +134,143 @@ def get_indexes(self, cursor, table_name):
indexes[row[0]] = {'primary_key': bool(row[1]),
'unique': bool(row[2])}
return indexes
+
+ def get_constraints(self, cursor, table_name):
+ """
+ Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
+ """
+ constraints = {}
+ # Loop over the constraints, getting PKs and uniques
+ cursor.execute("""
+ SELECT
+ user_constraints.constraint_name,
+ LOWER(cols.column_name) AS column_name,
+ CASE user_constraints.constraint_type
+ WHEN 'P' THEN 1
+ ELSE 0
+ END AS is_primary_key,
+ CASE user_indexes.uniqueness
+ WHEN 'UNIQUE' THEN 1
+ ELSE 0
+ END AS is_unique,
+ CASE user_constraints.constraint_type
+ WHEN 'C' THEN 1
+ ELSE 0
+ END AS is_check_constraint
+ FROM
+ user_constraints
+ INNER JOIN
+ user_indexes ON user_indexes.index_name = user_constraints.index_name
+ LEFT OUTER JOIN
+ user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name
+ WHERE
+ (
+ user_constraints.constraint_type = 'P' OR
+ user_constraints.constraint_type = 'U'
+ )
+ AND user_constraints.table_name = UPPER(%s)
+ ORDER BY cols.position
+ """, [table_name])
+ for constraint, column, pk, unique, check in cursor.fetchall():
+ # If we're the first column, make the record
+ if constraint not in constraints:
+ constraints[constraint] = {
+ "columns": [],
+ "primary_key": pk,
+ "unique": unique,
+ "foreign_key": None,
+ "check": check,
+ "index": True, # All P and U come with index, see inner join above
+ }
+ # Record the details
+ constraints[constraint]['columns'].append(column)
+ # Check constraints
+ cursor.execute("""
+ SELECT
+ cons.constraint_name,
+ LOWER(cols.column_name) AS column_name
+ FROM
+ user_constraints cons
+ LEFT OUTER JOIN
+ user_cons_columns cols ON cons.constraint_name = cols.constraint_name
+ WHERE
+ cons.constraint_type = 'C' AND
+ cons.table_name = UPPER(%s)
+ ORDER BY cols.position
+ """, [table_name])
+ for constraint, column in cursor.fetchall():
+ # If we're the first column, make the record
+ if constraint not in constraints:
+ constraints[constraint] = {
+ "columns": [],
+ "primary_key": False,
+ "unique": False,
+ "foreign_key": None,
+ "check": True,
+ "index": False,
+ }
+ # Record the details
+ constraints[constraint]['columns'].append(column)
+ # Foreign key constraints
+ cursor.execute("""
+ SELECT
+ cons.constraint_name,
+ LOWER(cols.column_name) AS column_name,
+ LOWER(rcons.table_name),
+ LOWER(rcols.column_name)
+ FROM
+ user_constraints cons
+ INNER JOIN
+ user_constraints rcons ON cons.r_constraint_name = rcons.constraint_name
+ INNER JOIN
+ user_cons_columns rcols ON rcols.constraint_name = rcons.constraint_name
+ LEFT OUTER JOIN
+ user_cons_columns cols ON cons.constraint_name = cols.constraint_name
+ WHERE
+ cons.constraint_type = 'R' AND
+ cons.table_name = UPPER(%s)
+ ORDER BY cols.position
+ """, [table_name])
+ for constraint, column, other_table, other_column in cursor.fetchall():
+ # If we're the first column, make the record
+ if constraint not in constraints:
+ constraints[constraint] = {
+ "columns": [],
+ "primary_key": False,
+ "unique": False,
+ "foreign_key": (other_table, other_column),
+ "check": False,
+ "index": False,
+ }
+ # Record the details
+ constraints[constraint]['columns'].append(column)
+ # Now get indexes
+ cursor.execute("""
+ SELECT
+ index_name,
+ LOWER(column_name)
+ FROM
+ user_ind_columns cols
+ WHERE
+ table_name = UPPER(%s) AND
+ NOT EXISTS (
+ SELECT 1
+ FROM user_constraints cons
+ WHERE cols.index_name = cons.index_name
+ )
+ ORDER BY cols.column_position
+ """, [table_name])
+ for constraint, column in cursor.fetchall():
+ # If we're the first column, make the record
+ if constraint not in constraints:
+ constraints[constraint] = {
+ "columns": [],
+ "primary_key": False,
+ "unique": False,
+ "foreign_key": None,
+ "check": False,
+ "index": True,
+ }
+ # Record the details
+ constraints[constraint]['columns'].append(column)
+ return constraints
View
103 django/db/backends/oracle/schema.py
@@ -0,0 +1,103 @@
+import copy
+import datetime
+from django.utils import six
+from django.db.backends.schema import BaseDatabaseSchemaEditor
+from django.db.utils import DatabaseError
+
+
+class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
+
+ sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s"
+ sql_alter_column_type = "MODIFY %(column)s %(type)s"
+ sql_alter_column_null = "MODIFY %(column)s NULL"
+ sql_alter_column_not_null = "MODIFY %(column)s NOT NULL"
+ sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s"
+ sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL"
+ sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s"
+ sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS"
+
+ def delete_model(self, model):
+ # Run superclass action
+ super(DatabaseSchemaEditor, self).delete_model(model)
+ # Clean up any autoincrement trigger
+ self.execute("""
+ DECLARE
+ i INTEGER;
+ BEGIN
+ SELECT COUNT(*) INTO i FROM USER_CATALOG
+ WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
+ IF i = 1 THEN
+ EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"';
+ END IF;
+ END;
+ /""" % {'sq_name': self.connection.ops._get_sequence_name(model._meta.db_table)})
+
+ def alter_field(self, model, old_field, new_field, strict=False):
+ try:
+ # Run superclass action
+ super(DatabaseSchemaEditor, self).alter_field(model, old_field, new_field, strict)
+ except DatabaseError as e:
+ description = str(e)
+ # If we're changing to/from LOB fields, we need to do a
+ # SQLite-ish workaround
+ if 'ORA-22858' in description or 'ORA-22859' in description:
+ self._alter_field_lob_workaround(model, old_field, new_field)
+ else:
+ raise
+
+ def _alter_field_lob_workaround(self, model, old_field, new_field):
+ """
+ Oracle refuses to change a column type from/to LOB to/from a regular
+ column. In Django, this shows up when the field is changed from/to
+ a TextField.
+ What we need to do instead is:
+ - Add the desired field with a temporary name
+ - Update the table to transfer values from old to new
+ - Drop old column
+ - Rename the new column
+ """
+ # Make a new field that's like the new one but with a temporary
+ # column name.
+ new_temp_field = copy.deepcopy(new_field)
+ new_temp_field.column = self._generate_temp_name(new_field.column)
+ # Add it
+ self.add_field(model, new_temp_field)
+ # Transfer values across
+ self.execute("UPDATE %s set %s=%s" % (
+ self.quote_name(model._meta.db_table),
+ self.quote_name(new_temp_field.column),
+ self.quote_name(old_field.column),
+ ))
+ # Drop the old field
+ self.remove_field(model, old_field)
+ # Rename the new field
+ self.alter_field(model, new_temp_field, new_field)
+ # Close the connection to force cx_Oracle to get column types right
+ # on a new cursor
+ self.connection.close()
+
+ def normalize_name(self, name):
+ """
+ Get the properly shortened and uppercased identifier as returned by quote_name(), but without the actual quotes.
+ """
+ nn = self.quote_name(name)
+ if nn[0] == '"' and nn[-1] == '"':
+ nn = nn[1:-1]
+ return nn
+
+ def _generate_temp_name(self, for_name):
+ """
+ Generates temporary names for workarounds that need temp columns
+ """
+ suffix = hex(hash(for_name)).upper()[1:]
+ return self.normalize_name(for_name + "_" + suffix)
+
+ def prepare_default(self, value):
+ if isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
+ return "'%s'" % value
+ elif isinstance(value, six.string_types):
+ return repr(value)
+ elif isinstance(value, bool):
+ return "1" if value else "0"
+ else:
+ return str(value)
View
7 django/db/backends/postgresql_psycopg2/base.py
@@ -14,6 +14,7 @@
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.db.backends.postgresql_psycopg2.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
+from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeText, SafeBytes
@@ -55,6 +56,8 @@ class DatabaseFeatures(BaseDatabaseFeatures):
supports_tablespaces = True
supports_transactions = True
can_distinct_on_fields = True
+ can_rollback_ddl = True
+ supports_combined_alters = True
nulls_order_largest = True
@@ -202,6 +205,10 @@ def is_usable(self):
else:
return True
+ def schema_editor(self):
+ "Returns a new instance of this backend's SchemaEditor"
+ return DatabaseSchemaEditor(self)
+
@cached_property
def psycopg2_version(self):
version = psycopg2.__version__.split(' ', 1)[0]
View
9 django/db/backends/postgresql_psycopg2/creation.py
@@ -25,14 +25,19 @@ class DatabaseCreation(BaseDatabaseCreation):
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
- 'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
- 'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
+ 'PositiveIntegerField': 'integer',
+ 'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
+ data_type_check_constraints = {
+ 'PositiveIntegerField': '"%(column)s" >= 0',
+ 'PositiveSmallIntegerField': '"%(column)s" >= 0',
+ }
+
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:
View
97 django/db/backends/postgresql_psycopg2/introspection.py
@@ -107,5 +107,100 @@ def get_indexes(self, cursor, table_name):
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
- indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]}
+ if row[0] not in indexes:
+ indexes[row[0]] = {'primary_key': False, 'unique': False}
+ # It's possible to have the unique and PK constraints in separate indexes.
+ if row[3]:
+ indexes[row[0]]['primary_key'] = True
+ if row[2]:
+ indexes[row[0]]['unique'] = True
return indexes
+
+ def get_constraints(self, cursor, table_name):
+ """
+ Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
+ """
+ constraints = {}
+ # Loop over the key table, collecting things as constraints
+ # This will get PKs, FKs, and uniques, but not CHECK
+ cursor.execute("""
+ SELECT
+ kc.constraint_name,
+ kc.column_name,
+ c.constraint_type,
+ array(SELECT table_name::text || '.' || column_name::text FROM information_schema.constraint_column_usage WHERE constraint_name = kc.constraint_name)
+ FROM information_schema.key_column_usage AS kc
+ JOIN information_schema.table_constraints AS c ON
+ kc.table_schema = c.table_schema AND
+ kc.table_name = c.table_name AND
+ kc.constraint_name = c.constraint_name
+ WHERE
+ kc.table_schema = %s AND
+ kc.table_name = %s
+ """, ["public", table_name])
+ for constraint, column, kind, used_cols in cursor.fetchall():
+ # If we're the first column, make the record
+ if constraint not in constraints:
+ constraints[constraint] = {
+ "columns": [],
+ "primary_key": kind.lower() == "primary key",
+ "unique": kind.lower() in ["primary key", "unique"],
+ "foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None,
+ "check": False,
+ "index": False,
+ }
+ # Record the details
+ constraints[constraint]['columns'].append(column)
+ # Now get CHECK constraint columns
+ cursor.execute("""
+ SELECT kc.constraint_name, kc.column_name
+ FROM information_schema.constraint_column_usage AS kc
+ JOIN information_schema.table_constraints AS c ON
+ kc.table_schema = c.table_schema AND
+ kc.table_name = c.table_name AND
+ kc.constraint_name = c.constraint_name
+ WHERE
+ c.constraint_type = 'CHECK' AND
+ kc.table_schema = %s AND
+ kc.table_name = %s
+ """, ["public", table_name])
+ for constraint, column in cursor.fetchall():
+ # If we're the first column, make the record
+ if constraint not in constraints:
+ constraints[constraint] = {
+ "columns": [],
+ "primary_key": False,
+ "unique": False,
+ "foreign_key": None,
+ "check": True,
+ "index": False,
+ }
+ # Record the details
+ constraints[constraint]['columns'].append(column)
+ # Now get indexes
+ cursor.execute("""
+ SELECT
+ c2.relname,
+ ARRAY(
+ SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid)
+ FROM unnest(idx.indkey) i
+ ),
+ idx.indisunique,
+ idx.indisprimary
+ FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
+ pg_catalog.pg_index idx
+ WHERE c.oid = idx.indrelid
+ AND idx.indexrelid = c2.oid
+ AND c.relname = %s
+ """, [table_name])
+ for index, columns, unique, primary in cursor.fetchall():
+ if index not in constraints:
+ constraints[index] = {
+ "columns": list(columns),
+ "primary_key": primary,
+ "unique": unique,
+ "foreign_key": None,
+ "check": False,
+ "index": True,
+ }
+ return constraints
View
5 django/db/backends/postgresql_psycopg2/schema.py
@@ -0,0 +1,5 @@
+from django.db.backends.schema import BaseDatabaseSchemaEditor
+
+
+class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
+ pass
View
729 django/db/backends/schema.py
@@ -0,0 +1,729 @@
+import hashlib
+import operator
+import sys
+
+from django.db.backends.creation import BaseDatabaseCreation
+from django.db.backends.util import truncate_name
+from django.db.models.fields.related import ManyToManyField
+from django.db.transaction import atomic
+from django.utils.log import getLogger
+from django.utils.six.moves import reduce
+
+logger = getLogger('django.db.backends.schema')
+
+
+class BaseDatabaseSchemaEditor(object):
+ """
+ This class (and its subclasses) are responsible for emitting schema-changing
+ statements to the databases - model creation/removal/alteration, field
+ renaming, index fiddling, and so on.
+
+ It is intended to eventually completely replace DatabaseCreation.
+
+ This class should be used by creating an instance for each set of schema
+ changes (e.g. a syncdb run, a migration file), and by first calling start(),
+ then the relevant actions, and then commit(). This is necessary to allow
+ things like circular foreign key references - FKs will only be created once
+ commit() is called.
+ """
+
+ # Overrideable SQL templates
+ sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
+ sql_create_table_unique = "UNIQUE (%(columns)s)"
+ sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
+ sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
+ sql_delete_table = "DROP TABLE %(table)s CASCADE"
+
+ sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
+ sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
+ sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
+ sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
+ sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
+ sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
+ sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
+ sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
+ sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
+
+ sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
+ sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
+
+ sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
+ sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
+
+ sql_create_fk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
+ sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
+
+ sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s;"
+ sql_delete_index = "DROP INDEX %(name)s"
+
+ sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
+ sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
+
+ def __init__(self, connection):
+ self.connection = connection
+
+ # State-managing methods
+
+ def __enter__(self):
+ self.deferred_sql = []
+ atomic(self.connection.alias, self.connection.features.can_rollback_ddl).__enter__()
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if exc_type is None:
+ for sql in self.deferred_sql:
+ self.execute(sql)
+ atomic(self.connection.alias, self.connection.features.can_rollback_ddl).__exit__(None, None, None)
+ else:
+ # Continue propagating exception
+ return None
+
+ # Core utility functions
+
+ def execute(self, sql, params=[]):
+ """
+ Executes the given SQL statement, with optional parameters.
+ """
+ # Get the cursor
+ cursor = self.connection.cursor()
+ # Log the command we're running, then run it
+ logger.debug("%s; (params %r)" % (sql, params))
+ cursor.execute(sql, params)
+
+ def quote_name(self, name):
+ return self.connection.ops.quote_name(name)
+
+ # Field <-> database mapping functions
+
+ def column_sql(self, model, field, include_default=False):
+ """
+ Takes a field and returns its column definition.
+ The field must already have had set_attributes_from_name called.
+ """
+ # Get the column's type and use that as the basis of the SQL
+ db_params = field.db_parameters(connection=self.connection)
+ sql = db_params['type']
+ params = []
+ # Check for fields that aren't actually columns (e.g. M2M)
+ if sql is None:
+ return None
+ # Optionally add the tablespace if it's an implicitly indexed column
+ tablespace = field.db_tablespace or model._meta.db_tablespace
+ if tablespace and self.connection.features.supports_tablespaces and field.unique:
+ sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
+ # Work out nullability
+ null = field.null
+ # If we were told to include a default value, do so
+ default_value = self.effective_default(field)
+ if include_default and default_value is not None:
+ if self.connection.features.requires_literal_defaults:
+ # Some databases can't take defaults as a parameter (oracle)
+ # If this is the case, the individual schema backend should
+ # implement prepare_default
+ sql += " DEFAULT %s" % self.prepare_default(default_value)
+ else:
+ sql += " DEFAULT %s"
+ params += [default_value]
+ # Oracle treats the empty string ('') as null, so coerce the null
+ # option whenever '' is a possible value.
+ if (field.empty_strings_allowed and not field.primary_key and
+ self.connection.features.interprets_empty_strings_as_nulls):
+ null = True
+ if null:
+ sql += " NULL"
+ else:
+ sql += " NOT NULL"
+ # Primary key/unique outputs
+ if field.primary_key:
+ sql += " PRIMARY KEY"
+ elif field.unique:
+ sql += " UNIQUE"
+ # Return the sql
+ return sql, params
+
+ def prepare_default(self, value):
+ """
+ Only used for backends which have requires_literal_defaults feature
+ """
+ raise NotImplementedError()
+
+ def effective_default(self, field):
+ """
+ Returns a field's effective database default value
+ """
+ if field.has_default():
+ default = field.get_default()
+ elif not field.null and field.blank and field.empty_strings_allowed:
+ default = ""
+ else:
+ default = None
+ # If it's a callable, call it
+ if callable(default):
+ default = default()
+ return default
+
+ # Actions
+
+ def create_model(self, model):
+ """
+ Takes a model and creates a table for it in the database.
+ Will also create any accompanying indexes or unique constraints.
+ """
+ # Create column SQL, add FK deferreds if needed
+ column_sqls = []
+ params = []
+ for field in model._meta.local_fields:
+ # SQL
+ definition, extra_params = self.column_sql(model, field)
+ if definition is None:
+ continue
+ # Check constraints can go on the column SQL here
+ db_params = field.db_parameters(connection=self.connection)
+ if db_params['check']:
+ definition += " CHECK (%s)" % db_params['check']
+ # Add the SQL to our big list
+ column_sqls.append("%s %s" % (
+ self.quote_name(field.column),
+ definition,
+ ))
+ params.extend(extra_params)
+ # Indexes
+ if field.db_index and not field.unique:
+ self.deferred_sql.append(
+ self.sql_create_index % {
+ "name": self._create_index_name(model, [field.column], suffix=""),
+ "table": self.quote_name(model._meta.db_table),
+ "columns": self.quote_name(field.column),
+ "extra": "",
+ }
+ )
+ # FK
+ if field.rel and self.connection.features.supports_foreign_keys:
+ to_table = field.rel.to._meta.db_table
+ to_column = field.rel.to._meta.get_field(field.rel.field_name).column
+ self.deferred_sql.append(
+ self.sql_create_fk % {
+ "name": self._create_index_name(model, [field.column], suffix="_fk_%s_%s" % (to_table, to_column)),
+ "table": self.quote_name(model._meta.db_table),
+ "column": self.quote_name(field.column),
+ "to_table": self.quote_name(to_table),
+ "to_column": self.quote_name(to_column),
+ }
+ )
+ # Autoincrement SQL
+ if field.get_internal_type() == "AutoField":
+ autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
+ if autoinc_sql:
+ self.deferred_sql.extend(autoinc_sql)
+ # Add any unique_togethers
+ for fields in model._meta.unique_together:
+ columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
+ column_sqls.append(self.sql_create_table_unique % {
+ "columns": ", ".join(self.quote_name(column) for column in columns),
+ })
+ # Make the table
+ sql = self.sql_create_table % {
+ "table": model._meta.db_table,
+ "definition": ", ".join(column_sqls)
+ }
+ self.execute(sql, params)
+ # Add any index_togethers
+ for fields in model._meta.index_together:
+ columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
+ self.execute(self.sql_create_index % {
+ "table": self.quote_name(model._meta.db_table),
+ "name": self._create_index_name(model, columns, suffix="_idx"),
+ "columns": ", ".join(self.quote_name(column) for column in columns),
+ "extra": "",
+ })
+ # Make M2M tables
+ for field in model._meta.local_many_to_many:
+ self.create_model(field.rel.through)
+
+ def delete_model(self, model):
+ """
+ Deletes a model from the database.
+ """
+ # Delete the table
+ self.execute(self.sql_delete_table % {
+ "table": self.quote_name(model._meta.db_table),
+ })
+
+ def alter_unique_together(self, model, old_unique_together, new_unique_together):
+ """
+ Deals with a model changing its unique_together.
+ Note: The input unique_togethers must be doubly-nested, not the single-
+ nested ["foo", "bar"] format.
+ """
+ olds = set(tuple(fields) for fields in old_unique_together)
+ news = set(tuple(fields) for fields in new_unique_together)
+ # Deleted uniques
+ for fields in olds.difference(news):
+ columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
+ constraint_names = self._constraint_names(model, columns, unique=True)
+ if len(constraint_names) != 1:
+ raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
+ len(constraint_names),
+ model._meta.db_table,
+ ", ".join(columns),
+ ))
+ self.execute(
+ self.sql_delete_unique % {
+ "table": self.quote_name(model._meta.db_table),
+ "name": constraint_names[0],
+ },
+ )
+ # Created uniques
+ for fields in news.difference(olds):
+ columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
+ self.execute(self.sql_create_unique % {
+ "table": self.quote_name(model._meta.db_table),
+ "name": self._create_index_name(model, columns, suffix="_uniq"),
+ "columns": ", ".join(self.quote_name(column) for column in columns),
+ })
+
+ def alter_index_together(self, model, old_index_together, new_index_together):
+ """
+ Deals with a model changing its index_together.
+ Note: The input index_togethers must be doubly-nested, not the single-
+ nested ["foo", "bar"] format.
+ """
+ olds = set(tuple(fields) for fields in old_index_together)
+ news = set(tuple(fields) for fields in new_index_together)
+ # Deleted indexes
+ for fields in olds.difference(news):
+ columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
+ constraint_names = self._constraint_names(model, list(columns), index=True)
+ if len(constraint_names) != 1:
+ raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
+ len(constraint_names),
+ model._meta.db_table,
+ ", ".join(columns),
+ ))
+ self.execute(
+ self.sql_delete_index % {
+ "table": self.quote_name(model._meta.db_table),
+ "name": constraint_names[0],
+ },
+ )
+ # Created indexes
+ for fields in news.difference(olds):
+ columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
+ self.execute(self.sql_create_index % {
+ "table": self.quote_name(model._meta.db_table),
+ "name": self._create_index_name(model, columns, suffix="_idx"),
+ "columns": ", ".join(self.quote_name(column) for column in columns),
+ "extra": "",
+ })
+
+ def alter_db_table(self, model, old_db_table, new_db_table):
+ """
+ Renames the table a model points to.
+ """
+ self.execute(self.sql_rename_table % {
+ "old_table": self.quote_name(old_db_table),
+ "new_table": self.quote_name(new_db_table),
+ })
+
+ def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
+ """
+ Moves a model's table between tablespaces
+ """
+ self.execute(self.sql_retablespace_table % {
+ "table": self.quote_name(model._meta.db_table),
+ "old_tablespace": self.quote_name(old_db_tablespace),
+ "new_tablespace": self.quote_name(new_db_tablespace),
+ })
+
+ def add_field(self, model, field):
+ """
+ Creates a field on a model.
+ Usually involves adding a column, but may involve adding a
+ table instead (for M2M fields)
+ """
+ # Special-case implicit M2M tables
+ if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
+ return self.create_model(field.rel.through)
+ # Get the column's definition
+ definition, params = self.column_sql(model, field, include_default=True)
+ # It might not actually have a column behind it
+ if definition is None:
+ return
+ # Check constraints can go on the column SQL here
+ db_params = field.db_parameters(connection=self.connection)
+ if db_params['check']:
+ definition += " CHECK (%s)" % db_params['check']
+ # Build the SQL and run it
+ sql = self.sql_create_column % {
+ "table": self.quote_name(model._meta.db_table),
+ "column": self.quote_name(field.column),
+ "definition": definition,
+ }
+ self.execute(sql, params)
+ # Drop the default if we need to
+ # (Django usually does not use in-database defaults)
+ if field.default is not None:
+ sql = self.sql_alter_column % {
+ "table": self.quote_name(model._meta.db_table),
+ "changes": self.sql_alter_column_no_default % {
+ "column": self.quote_name(field.column),
+ }
+ }
+ self.execute(sql)
+ # Add an index, if required
+ if field.db_index and not field.unique:
+ self.deferred_sql.append(
+ self.sql_create_index % {
+ "name": self._create_index_name(model, [field.column], suffix=""),
+ "table": self.quote_name(model._meta.db_table),
+ "columns": self.quote_name(field.column),
+ "extra": "",
+ }
+ )
+ # Add any FK constraints later
+ if field.rel and self.connection.features.supports_foreign_keys:
+ to_table = field.rel.to._meta.db_table
+ to_column = field.rel.to._meta.get_field(field.rel.field_name).column
+ self.deferred_sql.append(
+ self.sql_create_fk % {
+ "name": '%s_refs_%s_%x' % (
+ field.column,
+ to_column,
+ abs(hash((model._meta.db_table, to_table)))
+ ),
+ "table": self.quote_name(model._meta.db_table),
+ "column": self.quote_name(field.column),
+ "to_table": self.quote_name(to_table),
+ "to_column": self.quote_name(to_column),
+ }
+ )
+ # Reset connection if required
+ if self.connection.features.connection_persists_old_columns:
+ self.connection.close()
+
+ def remove_field(self, model, field):
+ """
+ Removes a field from a model. Usually involves deleting a column,
+ but for M2Ms may involve deleting a table.
+ """
+ # Special-case implicit M2M tables
+ if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
+ return self.delete_model(field.rel.through)
+ # It might not actually have a column behind it
+ if field.db_parameters(connection=self.connection)['type'] is None:
+ return
+ # Get the column's definition
+ definition, params = self.column_sql(model, field)
+ # Delete the column
+ sql = self.sql_delete_column % {
+ "table": self.quote_name(model._meta.db_table),
+ "column": self.quote_name(field.column),
+ }
+ self.execute(sql)
+ # Reset connection if required
+ if self.connection.features.connection_persists_old_columns:
+ self.connection.close()
+
+ def alter_field(self, model, old_field, new_field, strict=False):
+ """
+ Allows a field's type, uniqueness, nullability, default, column,
+ constraints etc. to be modified.
+ Requires a copy of the old field as well so we can only perform
+ changes that are required.
+ If strict is true, raises errors if the old column does not match old_field precisely.
+ """
+ # Ensure this field is even column-based
+ old_db_params = old_field.db_parameters(connection=self.connection)
+ old_type = old_db_params['type']
+ new_db_params = new_field.db_parameters(connection=self.connection)
+ new_type = new_db_params['type']
+ if old_type is None and new_type is None and (old_field.rel.through and new_field.rel.through and old_field.rel.through._meta.auto_created and new_field.rel.through._meta.auto_created):
+ return self._alter_many_to_many(model, old_field, new_field, strict)
+ elif old_type is None or new_type is None:
+ raise ValueError("Cannot alter field %s into %s - they are not compatible types (probably means only one is an M2M with implicit through model)" % (
+ old_field,
+ new_field,
+ ))
+ # Has unique been removed?
+ if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
+ # Find the unique constraint for this field
+ constraint_names = self._constraint_names(model, [old_field.column], unique=True)
+ if strict and len(constraint_names) != 1:
+ raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
+ len(constraint_names),
+ model._meta.db_table,
+ old_field.column,
+ ))
+ for constraint_name in constraint_names:
+ self.execute(
+ self.sql_delete_unique % {
+ "table": self.quote_name(model._meta.db_table),
+ "name