Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merged branch 'database-level-autocommit'.

Fixed #2227: `atomic` supports nesting.
Fixed #6623: `commit_manually` is deprecated and `atomic` doesn't suffer from this defect.
Fixed #8320: the problem wasn't identified, but the legacy transaction management is deprecated.
Fixed #10744: the problem wasn't identified, but the legacy transaction management is deprecated.
Fixed #10813: since autocommit is enabled, it isn't necessary to rollback after errors any more.
Fixed #13742: savepoints are now implemented for SQLite.
Fixed #13870: transaction management in long running processes isn't a problem any more, and it's documented.
Fixed #14970: while it digresses on transaction management, this ticket essentially asks for autocommit on PostgreSQL.
Fixed #15694: `atomic` supports nesting.
Fixed #17887: autocommit makes it impossible for a connection to stay "idle of transaction".
  • Loading branch information...
commit 14cddf51c5f001bb426ce7f7a83fdc52c8d8aee9 2 parents 9cec689 + e654180
@aaugustin aaugustin authored
Showing with 1,747 additions and 936 deletions.
  1. +1 −0  AUTHORS
  2. +0 −4 django/contrib/gis/utils/layermapping.py
  3. +0 −1  django/contrib/sessions/backends/db.py
  4. +1 −6 django/core/cache/backends/db.py
  5. +10 −2 django/core/handlers/base.py
  6. +10 −11 django/core/management/commands/createcachetable.py
  7. +4 −5 django/core/management/commands/flush.py
  8. +21 −50 django/core/management/commands/loaddata.py
  9. +37 −40 django/core/management/commands/syncdb.py
  10. +1 −11 django/db/__init__.py
  11. +140 −107 django/db/backends/__init__.py
  12. +6 −15 django/db/backends/creation.py
  13. +1 −4 django/db/backends/dummy/base.py
  14. +3 −9 django/db/backends/mysql/base.py
  15. +3 −0  django/db/backends/oracle/base.py
  16. +0 −3  django/db/backends/oracle/creation.py
  17. +26 −47 django/db/backends/postgresql_psycopg2/base.py
  18. +0 −11 django/db/backends/postgresql_psycopg2/creation.py
  19. +0 −9 django/db/backends/postgresql_psycopg2/operations.py
  20. +32 −0 django/db/backends/sqlite3/base.py
  21. +0 −3  django/db/backends/sqlite3/creation.py
  22. +42 −42 django/db/models/base.py
  23. +32 −52 django/db/models/deletion.py
  24. +2 −26 django/db/models/query.py
  25. +251 −36 django/db/transaction.py
  26. +8 −0 django/db/utils.py
  27. +13 −3 django/middleware/transaction.py
  28. +10 −26 django/test/testcases.py
  29. +17 −2 docs/internals/deprecation.txt
  30. +13 −46 docs/ref/databases.txt
  31. +4 −0 docs/ref/middleware.txt
  32. +2 −2 docs/ref/request-response.txt
  33. +30 −0 docs/ref/settings.txt
  34. +2 −4 docs/releases/1.3-alpha-1.txt
  35. +2 −4 docs/releases/1.3.txt
  36. +37 −0 docs/releases/1.6.txt
  37. +26 −35 docs/topics/db/sql.txt
  38. +525 −219 docs/topics/db/transactions.txt
  39. +14 −5 tests/backends/tests.py
  40. +1 −3 tests/delete_regress/tests.py
  41. +9 −2 tests/fixtures_model_package/tests.py
  42. +5 −2 tests/fixtures_regress/tests.py
  43. +27 −3 tests/handlers/tests.py
  44. +6 −3 tests/handlers/urls.py
  45. +17 −0 tests/handlers/views.py
  46. +7 −15 tests/middleware/tests.py
  47. +0 −2  tests/requests/tests.py
  48. +0 −3  tests/select_for_update/tests.py
  49. +0 −1  tests/serializers/tests.py
  50. +1 −1  tests/transactions/models.py
  51. +311 −3 tests/transactions/tests.py
  52. +37 −58 tests/transactions_regress/tests.py
View
1  AUTHORS
@@ -434,6 +434,7 @@ answer newbie questions, and generally made Django that much better:
Andreas Pelme <andreas@pelme.se>
permonik@mesias.brnonet.cz
peter@mymart.com
+ Christophe Pettus <xof@thebuild.com>
pgross@thoughtworks.com
phaedo <http://phaedo.cx/>
phil@produxion.net
View
4 django/contrib/gis/utils/layermapping.py
@@ -555,10 +555,6 @@ def _save(feat_range=default_range, num_feat=0, num_saved=0):
except SystemExit:
raise
except Exception as msg:
- if self.transaction_mode == 'autocommit':
- # Rolling back the transaction so that other model saves
- # will work.
- transaction.rollback_unless_managed()
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
View
1  django/contrib/sessions/backends/db.py
@@ -74,7 +74,6 @@ def delete(self, session_key=None):
@classmethod
def clear_expired(cls):
Session.objects.filter(expire_date__lt=timezone.now()).delete()
- transaction.commit_unless_managed()
# At bottom to avoid circular import
View
7 django/core/cache/backends/db.py
@@ -10,7 +10,7 @@
from django.conf import settings
from django.core.cache.backends.base import BaseCache
-from django.db import connections, router, transaction, DatabaseError
+from django.db import connections, router, DatabaseError
from django.utils import timezone, six
from django.utils.encoding import force_bytes
@@ -70,7 +70,6 @@ def get(self, key, default=None, version=None):
cursor = connections[db].cursor()
cursor.execute("DELETE FROM %s "
"WHERE cache_key = %%s" % table, [key])
- transaction.commit_unless_managed(using=db)
return default
value = connections[db].ops.process_clob(row[1])
return pickle.loads(base64.b64decode(force_bytes(value)))
@@ -124,10 +123,8 @@ def _base_set(self, mode, key, value, timeout=None):
[key, b64encoded, connections[db].ops.value_to_db_datetime(exp)])
except DatabaseError:
# To be threadsafe, updates/inserts are allowed to fail silently
- transaction.rollback_unless_managed(using=db)
return False
else:
- transaction.commit_unless_managed(using=db)
return True
def delete(self, key, version=None):
@@ -139,7 +136,6 @@ def delete(self, key, version=None):
cursor = connections[db].cursor()
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
- transaction.commit_unless_managed(using=db)
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
@@ -184,7 +180,6 @@ def clear(self):
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute('DELETE FROM %s' % table)
- transaction.commit_unless_managed(using=db)
# For backwards compatibility
class CacheClass(DatabaseCache):
View
12 django/core/handlers/base.py
@@ -6,10 +6,10 @@
from django import http
from django.conf import settings
-from django.core import exceptions
from django.core import urlresolvers
from django.core import signals
from django.core.exceptions import MiddlewareNotUsed, PermissionDenied
+from django.db import connections, transaction
from django.utils.encoding import force_text
from django.utils.module_loading import import_by_path
from django.utils import six
@@ -65,6 +65,13 @@ def load_middleware(self):
# as a flag for initialization being complete.
self._request_middleware = request_middleware
+ def make_view_atomic(self, view):
+ if getattr(view, 'transactions_per_request', True):
+ for db in connections.all():
+ if db.settings_dict['ATOMIC_REQUESTS']:
+ view = transaction.atomic(using=db.alias)(view)
+ return view
+
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
try:
@@ -101,8 +108,9 @@ def get_response(self, request):
break
if response is None:
+ wrapped_callback = self.make_view_atomic(callback)
try:
- response = callback(request, *callback_args, **callback_kwargs)
+ response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
View
21 django/core/management/commands/createcachetable.py
@@ -53,14 +53,13 @@ def handle_label(self, tablename, **options):
for i, line in enumerate(table_output):
full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(');')
- curs = connection.cursor()
- try:
- curs.execute("\n".join(full_statement))
- except DatabaseError as e:
- transaction.rollback_unless_managed(using=db)
- raise CommandError(
- "Cache table '%s' could not be created.\nThe error was: %s." %
- (tablename, force_text(e)))
- for statement in index_output:
- curs.execute(statement)
- transaction.commit_unless_managed(using=db)
+ with transaction.commit_on_success_unless_managed():
+ curs = connection.cursor()
+ try:
+ curs.execute("\n".join(full_statement))
+ except DatabaseError as e:
+ raise CommandError(
+ "Cache table '%s' could not be created.\nThe error was: %s." %
+ (tablename, force_text(e)))
+ for statement in index_output:
+ curs.execute(statement)
View
9 django/core/management/commands/flush.py
@@ -57,18 +57,17 @@ def handle_noargs(self, **options):
if confirm == 'yes':
try:
- cursor = connection.cursor()
- for sql in sql_list:
- cursor.execute(sql)
+ with transaction.commit_on_success_unless_managed():
+ cursor = connection.cursor()
+ for sql in sql_list:
+ cursor.execute(sql)
except Exception as e:
- transaction.rollback_unless_managed(using=db)
raise CommandError("""Database %s couldn't be flushed. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the expected database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.
The full error: %s""" % (connection.settings_dict['NAME'], e))
- transaction.commit_unless_managed(using=db)
# Emit the post sync signal. This allows individual
# applications to respond as if the database had been
View
71 django/core/management/commands/loaddata.py
@@ -41,8 +41,6 @@ def handle(self, *fixture_labels, **options):
self.ignore = options.get('ignore')
self.using = options.get('database')
- connection = connections[self.using]
-
if not len(fixture_labels):
raise CommandError(
"No database fixture specified. Please provide the path of at "
@@ -51,13 +49,18 @@ def handle(self, *fixture_labels, **options):
self.verbosity = int(options.get('verbosity'))
- # commit is a stealth option - it isn't really useful as
- # a command line option, but it can be useful when invoking
- # loaddata from within another script.
- # If commit=True, loaddata will use its own transaction;
- # if commit=False, the data load SQL will become part of
- # the transaction in place when loaddata was invoked.
- commit = options.get('commit', True)
+ with transaction.commit_on_success_unless_managed(using=self.using):
+ self.loaddata(fixture_labels)
+
+ # Close the DB connection -- unless we're still in a transaction. This
+ # is required as a workaround for an edge case in MySQL: if the same
+ # connection is used to create tables, load data, and query, the query
+ # can return incorrect results. See Django #7572, MySQL #37735.
+ if transaction.get_autocommit(self.using):
+ connections[self.using].close()
+
+ def loaddata(self, fixture_labels):
+ connection = connections[self.using]
# Keep a count of the installed objects and fixtures
self.fixture_count = 0
@@ -65,18 +68,6 @@ def handle(self, *fixture_labels, **options):
self.fixture_object_count = 0
self.models = set()
- # Get a cursor (even though we don't need one yet). This has
- # the side effect of initializing the test database (if
- # it isn't already initialized).
- cursor = connection.cursor()
-
- # Start transaction management. All fixtures are installed in a
- # single transaction to ensure that all references are resolved.
- if commit:
- transaction.commit_unless_managed(using=self.using)
- transaction.enter_transaction_management(using=self.using)
- transaction.managed(True, using=self.using)
-
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
@@ -105,26 +96,17 @@ def read(self):
app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths]
- try:
- with connection.constraint_checks_disabled():
- for fixture_label in fixture_labels:
- self.load_label(fixture_label, app_fixtures)
-
- # Since we disabled constraint checks, we must manually check for
- # any invalid keys that might have been added
- table_names = [model._meta.db_table for model in self.models]
- try:
- connection.check_constraints(table_names=table_names)
- except Exception as e:
- e.args = ("Problem installing fixtures: %s" % e,)
- raise
+ with connection.constraint_checks_disabled():
+ for fixture_label in fixture_labels:
+ self.load_label(fixture_label, app_fixtures)
- except (SystemExit, KeyboardInterrupt):
- raise
+ # Since we disabled constraint checks, we must manually check for
+ # any invalid keys that might have been added
+ table_names = [model._meta.db_table for model in self.models]
+ try:
+ connection.check_constraints(table_names=table_names)
except Exception as e:
- if commit:
- transaction.rollback(using=self.using)
- transaction.leave_transaction_management(using=self.using)
+ e.args = ("Problem installing fixtures: %s" % e,)
raise
# If we found even one object in a fixture, we need to reset the
@@ -137,10 +119,6 @@ def read(self):
for line in sequence_sql:
cursor.execute(line)
- if commit:
- transaction.commit(using=self.using)
- transaction.leave_transaction_management(using=self.using)
-
if self.verbosity >= 1:
if self.fixture_object_count == self.loaded_object_count:
self.stdout.write("Installed %d object(s) from %d fixture(s)" % (
@@ -149,13 +127,6 @@ def read(self):
self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)" % (
self.loaded_object_count, self.fixture_object_count, self.fixture_count))
- # Close the DB connection. This is required as a workaround for an
- # edge case in MySQL: if the same connection is used to
- # create tables, load data, and query, the query can return
- # incorrect results. See Django #7572, MySQL #37735.
- if commit:
- connection.close()
-
def load_label(self, fixture_label, app_fixtures):
parts = fixture_label.split('.')
View
77 django/core/management/commands/syncdb.py
@@ -83,26 +83,25 @@ def model_installed(model):
# Create the tables for each model
if verbosity >= 1:
self.stdout.write("Creating tables ...\n")
- for app_name, model_list in manifest.items():
- for model in model_list:
- # Create the model's database table, if it doesn't already exist.
- if verbosity >= 3:
- self.stdout.write("Processing %s.%s model\n" % (app_name, model._meta.object_name))
- sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
- seen_models.add(model)
- created_models.add(model)
- for refto, refs in references.items():
- pending_references.setdefault(refto, []).extend(refs)
- if refto in seen_models:
- sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
- sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
- if verbosity >= 1 and sql:
- self.stdout.write("Creating table %s\n" % model._meta.db_table)
- for statement in sql:
- cursor.execute(statement)
- tables.append(connection.introspection.table_name_converter(model._meta.db_table))
-
- transaction.commit_unless_managed(using=db)
+ with transaction.commit_on_success_unless_managed(using=db):
+ for app_name, model_list in manifest.items():
+ for model in model_list:
+ # Create the model's database table, if it doesn't already exist.
+ if verbosity >= 3:
+ self.stdout.write("Processing %s.%s model\n" % (app_name, model._meta.object_name))
+ sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
+ seen_models.add(model)
+ created_models.add(model)
+ for refto, refs in references.items():
+ pending_references.setdefault(refto, []).extend(refs)
+ if refto in seen_models:
+ sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
+ sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
+ if verbosity >= 1 and sql:
+ self.stdout.write("Creating table %s\n" % model._meta.db_table)
+ for statement in sql:
+ cursor.execute(statement)
+ tables.append(connection.introspection.table_name_converter(model._meta.db_table))
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
@@ -122,17 +121,16 @@ def model_installed(model):
if custom_sql:
if verbosity >= 2:
self.stdout.write("Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
- try:
- for sql in custom_sql:
- cursor.execute(sql)
- except Exception as e:
- self.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
- (app_name, model._meta.object_name, e))
- if show_traceback:
- traceback.print_exc()
- transaction.rollback_unless_managed(using=db)
- else:
- transaction.commit_unless_managed(using=db)
+ with transaction.commit_on_success_unless_managed(using=db):
+ try:
+ for sql in custom_sql:
+ cursor.execute(sql)
+ except Exception as e:
+ self.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
+ (app_name, model._meta.object_name, e))
+ if show_traceback:
+ traceback.print_exc()
+ raise
else:
if verbosity >= 3:
self.stdout.write("No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
@@ -147,15 +145,14 @@ def model_installed(model):
if index_sql:
if verbosity >= 2:
self.stdout.write("Installing index for %s.%s model\n" % (app_name, model._meta.object_name))
- try:
- for sql in index_sql:
- cursor.execute(sql)
- except Exception as e:
- self.stderr.write("Failed to install index for %s.%s model: %s\n" % \
- (app_name, model._meta.object_name, e))
- transaction.rollback_unless_managed(using=db)
- else:
- transaction.commit_unless_managed(using=db)
+ with transaction.commit_on_success_unless_managed(using=db):
+ try:
+ for sql in index_sql:
+ cursor.execute(sql)
+ except Exception as e:
+ self.stderr.write("Failed to install index for %s.%s model: %s\n" % \
+ (app_name, model._meta.object_name, e))
+ raise
# Load initial_data fixtures (unless that has been disabled)
if load_initial_data:
View
12 django/db/__init__.py
@@ -70,6 +70,7 @@ def reset_queries(**kwargs):
# their lifetime. NB: abort() doesn't do anything outside of a transaction.
def close_old_connections(**kwargs):
for conn in connections.all():
+ # Remove this when the legacy transaction management goes away.
try:
conn.abort()
except DatabaseError:
@@ -77,14 +78,3 @@ def close_old_connections(**kwargs):
conn.close_if_unusable_or_obsolete()
signals.request_started.connect(close_old_connections)
signals.request_finished.connect(close_old_connections)
-
-# Register an event that rolls back the connections
-# when a Django request has an exception.
-def _rollback_on_exception(**kwargs):
- from django.db import transaction
- for conn in connections:
- try:
- transaction.rollback_unless_managed(using=conn)
- except DatabaseError:
- pass
-signals.got_request_exception.connect(_rollback_on_exception)
View
247 django/db/backends/__init__.py
@@ -44,11 +44,21 @@ def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
self.savepoint_state = 0
# Transaction management related attributes
+ self.autocommit = False
self.transaction_state = []
# Tracks if the connection is believed to be in transaction. This is
# set somewhat aggressively, as the DBAPI doesn't make it easy to
# deduce if the connection is in transaction or not.
self._dirty = False
+ # Tracks if the connection is in a transaction managed by 'atomic'
+ self.in_atomic_block = False
+ # Tracks if the transaction should be rolled back to the next
+ # available savepoint because of an exception in an inner block.
+ self.needs_rollback = False
+ # List of savepoints created by 'atomic'
+ self.savepoint_ids = []
+ # Hack to provide compatibility with legacy transaction management
+ self._atomic_forced_unmanaged = False
# Connection termination related attributes
self.close_at = None
@@ -85,20 +95,35 @@ def create_cursor(self):
"""Creates a cursor. Assumes that a connection is established."""
raise NotImplementedError
+ ##### Backend-specific methods for creating connections #####
+
+ def connect(self):
+ """Connects to the database. Assumes that the connection is closed."""
+ # Reset parameters defining when to close the connection
+ max_age = self.settings_dict['CONN_MAX_AGE']
+ self.close_at = None if max_age is None else time.time() + max_age
+ self.errors_occurred = False
+ # Establish the connection
+ conn_params = self.get_connection_params()
+ self.connection = self.get_new_connection(conn_params)
+ self.init_connection_state()
+ if self.settings_dict['AUTOCOMMIT']:
+ self.set_autocommit(True)
+ connection_created.send(sender=self.__class__, connection=self)
+
+ def ensure_connection(self):
+ """
+ Guarantees that a connection to the database is established.
+ """
+ if self.connection is None:
+ with self.wrap_database_errors():
+ self.connect()
+
##### Backend-specific wrappers for PEP-249 connection methods #####
def _cursor(self):
+ self.ensure_connection()
with self.wrap_database_errors():
- if self.connection is None:
- # Reset parameters defining when to close the connection
- max_age = self.settings_dict['CONN_MAX_AGE']
- self.close_at = None if max_age is None else time.time() + max_age
- self.errors_occurred = False
- # Establish the connection
- conn_params = self.get_connection_params()
- self.connection = self.get_new_connection(conn_params)
- self.init_connection_state()
- connection_created.send(sender=self.__class__, connection=self)
return self.create_cursor()
def _commit(self):
@@ -132,17 +157,19 @@ def cursor(self):
def commit(self):
"""
- Does the commit itself and resets the dirty flag.
+ Commits a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
+ self.validate_no_atomic_block()
self._commit()
self.set_clean()
def rollback(self):
"""
- Does the rollback itself and resets the dirty flag.
+ Rolls back a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
+ self.validate_no_atomic_block()
self._rollback()
self.set_clean()
@@ -160,54 +187,59 @@ def close(self):
##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
- if not self.features.uses_savepoints:
- return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
- if not self.features.uses_savepoints:
- return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
- if not self.features.uses_savepoints:
- return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
+ def _savepoint_allowed(self):
+ # Savepoints cannot be created outside a transaction
+ return self.features.uses_savepoints and not self.autocommit
+
##### Generic savepoint management methods #####
def savepoint(self):
"""
- Creates a savepoint (if supported and required by the backend) inside the
- current transaction. Returns an identifier for the savepoint that will be
- used for the subsequent rollback or commit.
+ Creates a savepoint inside the current transaction. Returns an
+ identifier for the savepoint that will be used for the subsequent
+ rollback or commit. Does nothing if savepoints are not supported.
"""
+ if not self._savepoint_allowed():
+ return
+
thread_ident = thread.get_ident()
+ tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1
-
- tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, self.savepoint_state)
+
+ self.validate_thread_sharing()
self._savepoint(sid)
+
return sid
def savepoint_rollback(self, sid):
"""
- Rolls back the most recent savepoint (if one exists). Does nothing if
- savepoints are not supported.
+ Rolls back to a savepoint. Does nothing if savepoints are not supported.
"""
+ if not self._savepoint_allowed():
+ return
+
self.validate_thread_sharing()
- if self.savepoint_state:
- self._savepoint_rollback(sid)
+ self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
- Commits the most recent savepoint (if one exists). Does nothing if
- savepoints are not supported.
+ Releases a savepoint. Does nothing if savepoints are not supported.
"""
+ if not self._savepoint_allowed():
+ return
+
self.validate_thread_sharing()
- if self.savepoint_state:
- self._savepoint_commit(sid)
+ self._savepoint_commit(sid)
def clean_savepoints(self):
"""
@@ -217,24 +249,15 @@ def clean_savepoints(self):
##### Backend-specific transaction management methods #####
- def _enter_transaction_management(self, managed):
- """
- A hook for backend-specific changes required when entering manual
- transaction handling.
- """
- pass
-
- def _leave_transaction_management(self, managed):
+ def _set_autocommit(self, autocommit):
"""
- A hook for backend-specific changes required when leaving manual
- transaction handling. Will usually be implemented only when
- _enter_transaction_management() is also required.
+ Backend-specific implementation to enable or disable autocommit.
"""
- pass
+ raise NotImplementedError
##### Generic transaction management methods #####
- def enter_transaction_management(self, managed=True):
+ def enter_transaction_management(self, managed=True, forced=False):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
@@ -243,12 +266,22 @@ def enter_transaction_management(self, managed=True):
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
+
+ If you switch off transaction management and there is a pending
+ commit/rollback, the data will be commited, unless "forced" is True.
"""
- if self.transaction_state:
- self.transaction_state.append(self.transaction_state[-1])
- else:
- self.transaction_state.append(settings.TRANSACTIONS_MANAGED)
- self._enter_transaction_management(managed)
+ self.validate_no_atomic_block()
+
+ self.ensure_connection()
+
+ self.transaction_state.append(managed)
+
+ if not managed and self.is_dirty() and not forced:
+ self.commit()
+ self.set_clean()
+
+ if managed == self.autocommit:
+ self.set_autocommit(not managed)
def leave_transaction_management(self):
"""
@@ -256,22 +289,48 @@ def leave_transaction_management(self):
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
+ self.validate_no_atomic_block()
+
+ self.ensure_connection()
+
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError(
"This code isn't under transaction management")
- # The _leave_transaction_management hook can change the dirty flag,
- # so memoize it.
- dirty = self._dirty
- # We will pass the next status (after leaving the previous state
- # behind) to subclass hook.
- self._leave_transaction_management(self.is_managed())
- if dirty:
+
+ if self.transaction_state:
+ managed = self.transaction_state[-1]
+ else:
+ managed = not self.settings_dict['AUTOCOMMIT']
+
+ if self._dirty:
self.rollback()
+ if managed == self.autocommit:
+ self.set_autocommit(not managed)
raise TransactionManagementError(
"Transaction managed block ended with pending COMMIT/ROLLBACK")
+ if managed == self.autocommit:
+ self.set_autocommit(not managed)
+
+ def set_autocommit(self, autocommit):
+ """
+ Enable or disable autocommit.
+ """
+ self.validate_no_atomic_block()
+ self.ensure_connection()
+ self._set_autocommit(autocommit)
+ self.autocommit = autocommit
+
+ def validate_no_atomic_block(self):
+ """
+ Raise an error if an atomic block is active.
+ """
+ if self.in_atomic_block:
+ raise TransactionManagementError(
+ "This is forbidden when an 'atomic' block is active.")
+
def abort(self):
"""
Roll back any ongoing transaction and clean the transaction state
@@ -295,7 +354,8 @@ def set_dirty(self):
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
- self._dirty = True
+ if not self.autocommit:
+ self._dirty = True
def set_clean(self):
"""
@@ -306,51 +366,6 @@ def set_clean(self):
self._dirty = False
self.clean_savepoints()
- def is_managed(self):
- """
- Checks whether the transaction manager is in manual or in auto state.
- """
- if self.transaction_state:
- return self.transaction_state[-1]
- return settings.TRANSACTIONS_MANAGED
-
- def managed(self, flag=True):
- """
- Puts the transaction manager into a manual state: managed transactions have
- to be committed explicitly by the user. If you switch off transaction
- management and there is a pending commit/rollback, the data will be
- commited.
- """
- top = self.transaction_state
- if top:
- top[-1] = flag
- if not flag and self.is_dirty():
- self.commit()
- else:
- raise TransactionManagementError("This code isn't under transaction "
- "management")
-
- def commit_unless_managed(self):
- """
- Commits changes if the system is not in managed transaction mode.
- """
- self.validate_thread_sharing()
- if not self.is_managed():
- self.commit()
- self.clean_savepoints()
- else:
- self.set_dirty()
-
- def rollback_unless_managed(self):
- """
- Rolls back changes if the system is not in managed transaction mode.
- """
- self.validate_thread_sharing()
- if not self.is_managed():
- self.rollback()
- else:
- self.set_dirty()
-
##### Foreign key constraints checks handling #####
@contextmanager
@@ -402,12 +417,19 @@ def close_if_unusable_or_obsolete(self):
or if it outlived its maximum age.
"""
if self.connection is not None:
+ # If the application didn't restore the original autocommit setting,
+ # don't take chances, drop the connection.
+ if self.autocommit != self.settings_dict['AUTOCOMMIT']:
+ self.close()
+ return
+
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
else:
self.close()
return
+
if self.close_at is not None and time.time() >= self.close_at:
self.close()
return
@@ -460,6 +482,12 @@ def temporary_connection(self):
if must_close:
self.close()
+ def _start_transaction_under_autocommit(self):
+ """
+ Only required when autocommits_when_autocommit_is_off = True.
+ """
+ raise NotImplementedError
+
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
@@ -479,7 +507,6 @@ class BaseDatabaseFeatures(object):
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
- uses_autocommit = False
uses_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
@@ -563,6 +590,10 @@ class BaseDatabaseFeatures(object):
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
+ # Does the backend decide to commit before SAVEPOINT statements
+ # when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
+ autocommits_when_autocommit_is_off = False
+
def __init__(self, connection):
self.connection = connection
@@ -574,7 +605,6 @@ def supports_transactions(self):
# otherwise autocommit will cause the confimation to
# fail.
self.connection.enter_transaction_management()
- self.connection.managed(True)
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection.commit()
@@ -883,19 +913,19 @@ def savepoint_create_sql(self, sid):
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
- raise NotImplementedError
+ return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
- raise NotImplementedError
+ return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
- raise NotImplementedError
+ return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
@@ -946,6 +976,9 @@ def start_transaction_sql(self):
return "BEGIN;"
def end_transaction_sql(self, success=True):
+ """
+ Returns the SQL statement required to end a transaction.
+ """
if not success:
return "ROLLBACK;"
return "COMMIT;"
View
21 django/db/backends/creation.py
@@ -1,6 +1,7 @@
import hashlib
import sys
import time
+import warnings
from django.conf import settings
from django.db.utils import load_backend
@@ -382,10 +383,7 @@ def _create_test_db(self, verbosity, autoclobber):
qn = self.connection.ops.quote_name
- # Create the test database and connect to it. We need to autocommit
- # if the database supports it because PostgreSQL doesn't allow
- # CREATE/DROP DATABASE statements within transactions.
- self._prepare_for_test_db_ddl()
+ # Create the test database and connect to it.
cursor = self.connection.cursor()
try:
cursor.execute(
@@ -453,7 +451,6 @@ def _destroy_test_db(self, test_database_name, verbosity):
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
- self._prepare_for_test_db_ddl()
# Wait to avoid "database is being accessed by other users" errors.
time.sleep(1)
cursor.execute("DROP DATABASE %s"
@@ -466,16 +463,10 @@ def set_autocommit(self):
anymore by Django code. Kept for compatibility with user code that
might use it.
"""
- pass
-
- def _prepare_for_test_db_ddl(self):
- """
- Internal implementation - Hook for tasks that should be performed
- before the ``CREATE DATABASE``/``DROP DATABASE`` clauses used by
- testing code to create/ destroy test databases. Needed e.g. in
- PostgreSQL to rollback and close any active transaction.
- """
- pass
+ warnings.warn(
+ "set_autocommit was moved from BaseDatabaseCreation to "
+ "BaseDatabaseWrapper.", PendingDeprecationWarning, stacklevel=2)
+ return self.connection.set_autocommit(True)
def sql_table_creation_suffix(self):
"""
View
5 django/db/backends/dummy/base.py
@@ -55,12 +55,9 @@ class DatabaseWrapper(BaseDatabaseWrapper):
_savepoint = ignore
_savepoint_commit = complain
_savepoint_rollback = ignore
- _enter_transaction_management = complain
- _leave_transaction_management = ignore
+ _set_autocommit = complain
set_dirty = complain
set_clean = complain
- commit_unless_managed = complain
- rollback_unless_managed = ignore
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
View
12 django/db/backends/mysql/base.py
@@ -355,15 +355,6 @@ def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
- def savepoint_create_sql(self, sid):
- return "SAVEPOINT %s" % sid
-
- def savepoint_commit_sql(self, sid):
- return "RELEASE SAVEPOINT %s" % sid
-
- def savepoint_rollback_sql(self, sid):
- return "ROLLBACK TO SAVEPOINT %s" % sid
-
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
operators = {
@@ -445,6 +436,9 @@ def _rollback(self):
except Database.NotSupportedError:
pass
+ def _set_autocommit(self, autocommit):
+ self.connection.autocommit(autocommit)
+
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
View
3  django/db/backends/oracle/base.py
@@ -612,6 +612,9 @@ def _commit(self):
def _savepoint_commit(self, sid):
pass
+ def _set_autocommit(self, autocommit):
+ self.connection.autocommit = autocommit
+
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
View
3  django/db/backends/oracle/creation.py
@@ -273,6 +273,3 @@ def test_db_signature(self):
settings_dict['NAME'],
self._test_database_user(),
)
-
- def set_autocommit(self):
- self.connection.connection.autocommit = True
View
73 django/db/backends/postgresql_psycopg2/base.py
@@ -49,6 +49,7 @@ class DatabaseFeatures(BaseDatabaseFeatures):
has_select_for_update = True
has_select_for_update_nowait = True
has_bulk_insert = True
+ uses_savepoints = True
supports_tablespaces = True
supports_transactions = True
can_distinct_on_fields = True
@@ -77,15 +78,11 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
+ opts = self.settings_dict["OPTIONS"]
+ RC = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
+ self.isolation_level = opts.get('isolation_level', RC)
+
self.features = DatabaseFeatures(self)
- autocommit = self.settings_dict["OPTIONS"].get('autocommit', False)
- self.features.uses_autocommit = autocommit
- if autocommit:
- level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
- else:
- level = self.settings_dict["OPTIONS"].get('isolation_level',
- psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
- self._set_isolation_level(level)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
@@ -135,8 +132,7 @@ def init_connection_state(self):
if conn_tz != tz:
# Set the time zone in autocommit mode (see #17062)
- self.connection.set_isolation_level(
- psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ self.set_autocommit(True)
self.connection.cursor().execute(
self.ops.set_time_zone_sql(), [tz])
self.connection.set_isolation_level(self.isolation_level)
@@ -167,44 +163,22 @@ def close(self):
finally:
self.set_clean()
- def _enter_transaction_management(self, managed):
- """
- Switch the isolation level when needing transaction support, so that
- the same transaction is visible across all the queries.
- """
- if self.features.uses_autocommit and managed and not self.isolation_level:
- level = self.settings_dict["OPTIONS"].get('isolation_level',
- psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
- self._set_isolation_level(level)
-
- def _leave_transaction_management(self, managed):
- """
- If the normal operating mode is "autocommit", switch back to that when
- leaving transaction management.
- """
- if self.features.uses_autocommit and not managed and self.isolation_level:
- self._set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
-
- def _set_isolation_level(self, level):
- """
- Do all the related feature configurations for changing isolation
- levels. This doesn't touch the uses_autocommit feature, since that
- controls the movement *between* isolation levels.
- """
- assert level in range(5)
- try:
- if self.connection is not None:
- self.connection.set_isolation_level(level)
- if level == psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT:
- self.set_clean()
- finally:
- self.isolation_level = level
- self.features.uses_savepoints = bool(level)
+ def _set_isolation_level(self, isolation_level):
+ assert isolation_level in range(1, 5) # Use set_autocommit for level = 0
+ if self.psycopg2_version >= (2, 4, 2):
+ self.connection.set_session(isolation_level=isolation_level)
+ else:
+ self.connection.set_isolation_level(isolation_level)
- def set_dirty(self):
- if ((self.transaction_state and self.transaction_state[-1]) or
- not self.features.uses_autocommit):
- super(DatabaseWrapper, self).set_dirty()
+ def _set_autocommit(self, autocommit):
+ if self.psycopg2_version >= (2, 4, 2):
+ self.connection.autocommit = autocommit
+ else:
+ if autocommit:
+ level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
+ else:
+ level = self.isolation_level
+ self.connection.set_isolation_level(level)
def check_constraints(self, table_names=None):
"""
@@ -224,6 +198,11 @@ def is_usable(self):
return True
@cached_property
+ def psycopg2_version(self):
+ version = psycopg2.__version__.split(' ', 1)[0]
+ return tuple(int(v) for v in version.split('.'))
+
+ @cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
View
11 django/db/backends/postgresql_psycopg2/creation.py
@@ -77,14 +77,3 @@ def get_index_sql(index_name, opclass=''):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
return output
-
- def set_autocommit(self):
- self._prepare_for_test_db_ddl()
-
- def _prepare_for_test_db_ddl(self):
- """Rollback and close the active transaction."""
- # Make sure there is an open connection.
- self.connection.cursor()
- self.connection.connection.rollback()
- self.connection.connection.set_isolation_level(
- psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
View
9 django/db/backends/postgresql_psycopg2/operations.py
@@ -175,15 +175,6 @@ def sequence_reset_sql(self, style, model_list):
style.SQL_TABLE(qn(f.m2m_db_table()))))
return output
- def savepoint_create_sql(self, sid):
- return "SAVEPOINT %s" % sid
-
- def savepoint_commit_sql(self, sid):
- return "RELEASE SAVEPOINT %s" % sid
-
- def savepoint_rollback_sql(self, sid):
- return "ROLLBACK TO SAVEPOINT %s" % sid
-
def prep_for_iexact_query(self, x):
return x
View
32 django/db/backends/sqlite3/base.py
@@ -99,6 +99,11 @@ class DatabaseFeatures(BaseDatabaseFeatures):
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
+ autocommits_when_autocommit_is_off = True
+
+ @cached_property
+ def uses_savepoints(self):
+ return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def supports_stddev(self):
@@ -355,6 +360,25 @@ def close(self):
if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
+ def _savepoint_allowed(self):
+ # When 'isolation_level' is not None, sqlite3 commits before each
+ # savepoint; it's a bug. When it is None, savepoints don't make sense
+ # because autocommit is enabled. The only exception is inside atomic
+ # blocks. To work around that bug, on SQLite, atomic starts a
+ # transaction explicitly rather than simply disable autocommit.
+ return self.in_atomic_block
+
+ def _set_autocommit(self, autocommit):
+ if autocommit:
+ level = None
+ else:
+ # sqlite3's internal default is ''. It's different from None.
+ # See Modules/_sqlite/connection.c.
+ level = ''
+ # 'isolation_level' is a misleading API.
+ # SQLite always runs at the SERIALIZABLE isolation level.
+ self.connection.isolation_level = level
+
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
@@ -392,6 +416,14 @@ def check_constraints(self, table_names=None):
def is_usable(self):
return True
+ def _start_transaction_under_autocommit(self):
+ """
+ Start a transaction explicitly in autocommit mode.
+
+ Staying in autocommit mode works around a bug of sqlite3 that breaks
+ savepoints when autocommit is disabled.
+ """
+ self.cursor().execute("BEGIN")
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
View
3  django/db/backends/sqlite3/creation.py
@@ -72,9 +72,6 @@ def _destroy_test_db(self, test_database_name, verbosity):
# Remove the SQLite database file
os.remove(test_database_name)
- def set_autocommit(self):
- self.connection.connection.isolation_level = None
-
def test_db_signature(self):
"""
Returns a tuple that uniquely identifies a test database.
View
84 django/db/models/base.py
@@ -609,48 +609,48 @@ def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
if update_fields:
non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields]
- # First, try an UPDATE. If that doesn't update anything, do an INSERT.
- pk_val = self._get_pk_val(meta)
- pk_set = pk_val is not None
- record_exists = True
- manager = cls._base_manager
- if pk_set:
- # Determine if we should do an update (pk already exists, forced update,
- # no force_insert)
- if ((force_update or update_fields) or (not force_insert and
- manager.using(using).filter(pk=pk_val).exists())):
- if force_update or non_pks:
- values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
- if values:
- rows = manager.using(using).filter(pk=pk_val)._update(values)
- if force_update and not rows:
- raise DatabaseError("Forced update did not affect any rows.")
- if update_fields and not rows:
- raise DatabaseError("Save with update_fields did not affect any rows.")
- else:
- record_exists = False
- if not pk_set or not record_exists:
- if meta.order_with_respect_to:
- # If this is a model with an order_with_respect_to
- # autopopulate the _order field
- field = meta.order_with_respect_to
- order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
- self._order = order_value
-
- fields = meta.local_fields
- if not pk_set:
- if force_update or update_fields:
- raise ValueError("Cannot force an update in save() with no primary key.")
- fields = [f for f in fields if not isinstance(f, AutoField)]
+ with transaction.commit_on_success_unless_managed(using=using):
+ # First, try an UPDATE. If that doesn't update anything, do an INSERT.
+ pk_val = self._get_pk_val(meta)
+ pk_set = pk_val is not None
+ record_exists = True
+ manager = cls._base_manager
+ if pk_set:
+ # Determine if we should do an update (pk already exists, forced update,
+ # no force_insert)
+ if ((force_update or update_fields) or (not force_insert and
+ manager.using(using).filter(pk=pk_val).exists())):
+ if force_update or non_pks:
+ values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
+ if values:
+ rows = manager.using(using).filter(pk=pk_val)._update(values)
+ if force_update and not rows:
+ raise DatabaseError("Forced update did not affect any rows.")
+ if update_fields and not rows:
+ raise DatabaseError("Save with update_fields did not affect any rows.")
+ else:
+ record_exists = False
+ if not pk_set or not record_exists:
+ if meta.order_with_respect_to:
+ # If this is a model with an order_with_respect_to
+ # autopopulate the _order field
+ field = meta.order_with_respect_to
+ order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
+ self._order = order_value
+
+ fields = meta.local_fields
+ if not pk_set:
+ if force_update or update_fields:
+ raise ValueError("Cannot force an update in save() with no primary key.")
+ fields = [f for f in fields if not isinstance(f, AutoField)]
- record_exists = False
+ record_exists = False
- update_pk = bool(meta.has_auto_field and not pk_set)
- result = manager._insert([self], fields=fields, return_id=update_pk, using=using, raw=raw)
+ update_pk = bool(meta.has_auto_field and not pk_set)
+ result = manager._insert([self], fields=fields, return_id=update_pk, using=using, raw=raw)
- if update_pk:
- setattr(self, meta.pk.attname, result)
- transaction.commit_unless_managed(using=using)
+ if update_pk:
+ setattr(self, meta.pk.attname, result)
# Store the database on which the object was saved
self._state.db = using
@@ -963,9 +963,9 @@ def method_set_order(ordered_obj, self, id_list, using=None):
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
- for i, j in enumerate(id_list):
- ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
- transaction.commit_unless_managed(using=using)
+ with transaction.commit_on_success_unless_managed(using=using):
+ for i, j in enumerate(id_list):
+ ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
def method_get_order(ordered_obj, self):
View
84 django/db/models/deletion.py
@@ -50,26 +50,6 @@ def DO_NOTHING(collector, field, sub_objs, using):
pass
-def force_managed(func):
- @wraps(func)
- def decorated(self, *args, **kwargs):
- if not transaction.is_managed(using=self.using):
- transaction.enter_transaction_management(using=self.using)
- forced_managed = True
- else:
- forced_managed = False
- try:
- func(self, *args, **kwargs)
- if forced_managed:
- transaction.commit(using=self.using)
- else:
- transaction.commit_unless_managed(using=self.using)
- finally:
- if forced_managed:
- transaction.leave_transaction_management(using=self.using)
- return decorated
-
-
class Collector(object):
def __init__(self, using):
self.using = using
@@ -262,7 +242,6 @@ def sort(self):
self.data = SortedDict([(model, self.data[model])
for model in sorted_models])
- @force_managed
def delete(self):
# sort instance collections
for model, instances in self.data.items():
@@ -273,40 +252,41 @@ def delete(self):
# end of a transaction.
self.sort()
- # send pre_delete signals
- for model, obj in self.instances_with_model():
- if not model._meta.auto_created:
- signals.pre_delete.send(
- sender=model, instance=obj, using=self.using
- )
-
- # fast deletes
- for qs in self.fast_deletes:
- qs._raw_delete(using=self.using)
-
- # update fields
- for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
- query = sql.UpdateQuery(model)
- for (field, value), instances in six.iteritems(instances_for_fieldvalues):
- query.update_batch([obj.pk for obj in instances],
- {field.name: value}, self.using)
-
- # reverse instance collections
- for instances in six.itervalues(self.data):
- instances.reverse()
-
- # delete instances
- for model, instances in six.iteritems(self.data):
- query = sql.DeleteQuery(model)
- pk_list = [obj.pk for obj in instances]
- query.delete_batch(pk_list, self.using)
-
- if not model._meta.auto_created:
- for obj in instances:
- signals.post_delete.send(
+ with transaction.commit_on_success_unless_managed(using=self.using):
+ # send pre_delete signals
+ for model, obj in self.instances_with_model():
+ if not model._meta.auto_created:
+ signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
+ # fast deletes
+ for qs in self.fast_deletes:
+ qs._raw_delete(using=self.using)
+
+ # update fields
+ for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
+ query = sql.UpdateQuery(model)
+ for (field, value), instances in six.iteritems(instances_for_fieldvalues):
+ query.update_batch([obj.pk for obj in instances],
+ {field.name: value}, self.using)
+
+ # reverse instance collections
+ for instances in six.itervalues(self.data):
+ instances.reverse()
+
+ # delete instances
+ for model, instances in six.iteritems(self.data):
+ query = sql.DeleteQuery(model)
+ pk_list = [obj.pk for obj in instances]
+ query.delete_batch(pk_list, self.using)
+
+ if not model._meta.auto_created:
+ for obj in instances:
+ signals.post_delete.send(
+ sender=model, instance=obj, using=self.using
+ )
+
# update collected instances
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
View
28 django/db/models/query.py
@@ -442,12 +442,7 @@ def bulk_create(self, objs, batch_size=None):
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.local_fields
- if not transaction.is_managed(using=self.db):
- transaction.enter_transaction_management(using=self.db)
- forced_managed = True
- else:
- forced_managed = False
- try:
+ with transaction.commit_on_success_unless_managed(using=self.db):
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field):
self._batched_insert(objs, fields, batch_size)
@@ -458,13 +453,6 @@ def bulk_create(self, objs, batch_size=None):
if objs_without_pk:
fields= [f for f in fields if not isinstance(f, AutoField)]
self._batched_insert(objs_without_pk, fields, batch_size)
- if forced_managed:
- transaction.commit(using=self.db)
- else:
- transaction.commit_unless_managed(using=self.db)
- finally:
- if forced_managed:
- transaction.leave_transaction_management(using=self.db)
return objs
@@ -581,20 +569,8 @@ def update(self, **kwargs):
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
- if not transaction.is_managed(using=self.db):
- transaction.enter_transaction_management(using=self.db)
- forced_managed = True
- else:
- forced_managed = False
- try:
+ with transaction.commit_on_success_unless_managed(using=self.db):
rows = query.get_compiler(self.db).execute_sql(None)
- if forced_managed:
- transaction.commit(using=self.db)
- else:
- transaction.commit_unless_managed(using=self.db)
- finally:
- if forced_managed:
- transaction.leave_transaction_management(using=self.db)
self._result_cache = None
return rows
update.alters_data = True
View
287 django/db/transaction.py
@@ -12,9 +12,11 @@
or implicit commits or rollbacks.
"""
+import warnings
+
from functools import wraps
-from django.db import connections, DEFAULT_DB_ALIAS
+from django.db import connections, DatabaseError, DEFAULT_DB_ALIAS
class TransactionManagementError(Exception):
@@ -37,6 +39,10 @@ def get_connection(using=None):
using = DEFAULT_DB_ALIAS
return connections[using]
+###########################
+# Deprecated private APIs #
+###########################
+
def abort(using=None):
"""
Roll back any ongoing transactions and clean the transaction management
@@ -49,7 +55,7 @@ def abort(using=None):
"""
get_connection(using).abort()
-def enter_transaction_management(managed=True, using=None):
+def enter_transaction_management(managed=True, using=None, forced=False):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
@@ -59,7 +65,7 @@ def enter_transaction_management(managed=True, using=None):
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
- get_connection(using).enter_transaction_management(managed)
+ get_connection(using).enter_transaction_management(managed, forced)
def leave_transaction_management(using=None):
"""
@@ -92,52 +98,47 @@ def set_clean(using=None):
"""
get_connection(using).set_clean()
-def clean_savepoints(using=None):
- """
- Resets the counter used to generate unique savepoint ids in this thread.
- """
- get_connection(using).clean_savepoints()
-
def is_managed(using=None):
- """
- Checks whether the transaction manager is in manual or in auto state.
- """
- return get_connection(using).is_managed()
+ warnings.warn("'is_managed' is deprecated.",
+ PendingDeprecationWarning, stacklevel=2)
def managed(flag=True, using=None):
- """
- Puts the transaction manager into a manual state: managed transactions have
- to be committed explicitly by the user. If you switch off transaction
- management and there is a pending commit/rollback, the data will be
- commited.
- """
- get_connection(using).managed(flag)
+ warnings.warn("'managed' no longer serves a purpose.",
+ PendingDeprecationWarning, stacklevel=2)
def commit_unless_managed(using=None):
- """
- Commits changes if the system is not in managed transaction mode.
- """
- get_connection(using).commit_unless_managed()
+ warnings.warn("'commit_unless_managed' is now a no-op.",
+ PendingDeprecationWarning, stacklevel=2)
def rollback_unless_managed(using=None):
- """
- Rolls back changes if the system is not in managed transaction mode.
- """
- get_connection(using).rollback_unless_managed()
+ warnings.warn("'rollback_unless_managed' is now a no-op.",
+ PendingDeprecationWarning, stacklevel=2)
###############
# Public APIs #
###############
+def get_autocommit(using=None):
+ """
+ Get the autocommit status of the connection.
+ """
+ return get_connection(using).autocommit
+
+def set_autocommit(autocommit, using=None):
+ """
+ Set the autocommit status of the connection.
+ """
+ return get_connection(using).set_autocommit(autocommit)
+
def commit(using=None):
"""
- Does the commit itself and resets the dirty flag.
+ Commits a transaction and resets the dirty flag.
"""
get_connection(using).commit()
def rollback(using=None):
"""
- This function does the rollback itself and resets the dirty flag.
+ Rolls back a transaction and resets the dirty flag.
"""
get_connection(using).rollback()
@@ -163,9 +164,193 @@ def savepoint_commit(sid, using=None):
"""
get_connection(using).savepoint_commit(sid)
-##############
-# DECORATORS #
-##############
+def clean_savepoints(using=None):
+ """
+ Resets the counter used to generate unique savepoint ids in this thread.
+ """
+ get_connection(using).clean_savepoints()
+
+#################################
+# Decorators / context managers #
+#################################
+
+class Atomic(object):
+ """
+ This class guarantees the atomic execution of a given block.
+
+ An instance can be used either as a decorator or as a context manager.
+
+ When it's used as a decorator, __call__ wraps the execution of the
+ decorated function in the instance itself, used as a context manager.
+
+ When it's used as a context manager, __enter__ creates a transaction or a
+ savepoint, depending on whether a transaction is already in progress, and
+ __exit__ commits the transaction or releases the savepoint on normal exit,
+ and rolls back the transaction or to the savepoint on exceptions.
+
+ It's possible to disable the creation of savepoints if the goal is to
+ ensure that some code runs within a transaction without creating overhead.
+
+ A stack of savepoints identifiers is maintained as an attribute of the
+ connection. None denotes the absence of a savepoint.
+
+ This allows reentrancy even if the same AtomicWrapper is reused. For
+ example, it's possible to define `oa = @atomic('other')` and use `@ao` or
+ `with oa:` multiple times.
+
+ Since database connections are thread-local, this is thread-safe.
+ """
+
+ def __init__(self, using, savepoint):
+ self.using = using
+ self.savepoint = savepoint
+
+ def _legacy_enter_transaction_management(self, connection):
+ if not connection.in_atomic_block:
+ if connection.transaction_state and connection.transaction_state[-1]:
+ connection._atomic_forced_unmanaged = True
+ connection.enter_transaction_management(managed=False)
+ else:
+ connection._atomic_forced_unmanaged = False
+
+ def _legacy_leave_transaction_management(self, connection):
+ if not connection.in_atomic_block and connection._atomic_forced_unmanaged:
+ connection.leave_transaction_management()
+
+ def __enter__(self):
+ connection = get_connection(self.using)
+
+ # Ensure we have a connection to the database before testing
+ # autocommit status.
+ connection.ensure_connection()
+
+ # Remove this when the legacy transaction management goes away.
+ self._legacy_enter_transaction_management(connection)
+
+ if not connection.in_atomic_block and not connection.autocommit:
+ raise TransactionManagementError(
+ "'atomic' cannot be used when autocommit is disabled.")
+
+ if connection.in_atomic_block:
+ # We're already in a transaction; create a savepoint, unless we
+ # were told not to or we're already waiting for a rollback. The
+ # second condition avoids creating useless savepoints and prevents
+ # overwriting needs_rollback until the rollback is performed.
+ if self.savepoint and not connection.needs_rollback:
+ sid = connection.savepoint()
+ connection.savepoint_ids.append(sid)
+ else:
+ connection.savepoint_ids.append(None)
+ else:
+ # We aren't in a transaction yet; create one.
+ # The usual way to start a transaction is to turn autocommit off.
+ # However, some database adapters (namely sqlite3) don't handle
+ # transactions and savepoints properly when autocommit is off.
+ # In such cases, start an explicit transaction instead, which has
+ # the side-effect of disabling autocommit.
+ if connection.features.autocommits_when_autocommit_is_off:
+ connection._start_transaction_under_autocommit()
+ connection.autocommit = False
+ else:
+ connection.set_autocommit(False)
+ connection.in_atomic_block = True
+ connection.needs_rollback = False
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ connection = get_connection(self.using)
+ if exc_value is None and not connection.needs_rollback:
+ if connection.savepoint_ids:
+ # Release savepoint if there is one
+ sid = connection.savepoint_ids.pop()
+ if sid is not None:
+ try:
+ connection.savepoint_commit(sid)
+ except DatabaseError:
+ connection.savepoint_rollback(sid)
+ # Remove this when the legacy transaction management goes away.
+ self._legacy_leave_transaction_management(connection)
+ raise
+ else:
+ # Commit transaction
+ connection.in_atomic_block = False
+ try:
+ connection.commit()
+ except DatabaseError:
+ connection.rollback()
+ # Remove this when the legacy transaction management goes away.
+ self._legacy_leave_transaction_management(connection)
+ raise
+ finally:
+ if connection.features.autocommits_when_autocommit_is_off:
+ connection.autocommit = True
+ else:
+ connection.set_autocommit(True)
+ else:
+ # This flag will be set to True again if there isn't a savepoint
+ # allowing to perform the rollback at this level.
+ connection.needs_rollback = False
+ if connection.savepoint_ids:
+ # Roll back to savepoint if there is one, mark for rollback
+ # otherwise.
+ sid = connection.savepoint_ids.pop()
+ if sid is None:
+ connection.needs_rollback = True
+ else:
+ connection.savepoint_rollback(sid)
+ else:
+ # Roll back transaction
+ connection.in_atomic_block = False
+ try:
+ connection.rollback()
+ finally:
+ if connection.features.autocommits_when_autocommit_is_off:
+ connection.autocommit = True
+ else:
+ connection.set_autocommit(True)
+
+ # Remove this when the legacy transaction management goes away.
+ self._legacy_leave_transaction_management(connection)
+
+
+ def __call__(self, func):
+ @wraps(func)
+ def inner(*args, **kwargs):
+ with self:
+ return func(*args, **kwargs)
+ return inner
+
+
+def atomic(using=None, savepoint=True):
+ # Bare decorator: @atomic -- although the first argument is called
+ # `using`, it's actually the function being decorated.
+ if callable(using):
+ return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
+ # Decorator: @atomic(...) or context manager: with atomic(...): ...
+ else:
+ return Atomic(using, savepoint)
+
+
+def atomic_if_autocommit(using=None, savepoint=True):
+ # This variant only exists to support the ability to disable transaction
+ # management entirely in the DATABASES setting. It doesn't care about the
+ # autocommit state at run time.
+ db = DEFAULT_DB_ALIAS if callable(using) else using
+ autocommit = get_connection(db).settings_dict['AUTOCOMMIT']
+
+ if autocommit:
+ return atomic(using, savepoint)
+ else:
+ # Bare decorator: @atomic_if_autocommit
+ if callable(using):
+ return using
+ # Decorator: @atomic_if_autocommit(...)
+ else:
+ return lambda func: func
+
+
+############################################
+# Deprecated decorators / context managers #
+############################################
class Transaction(object):
"""
@@ -222,9 +407,11 @@ def autocommit(using=None):
this decorator is useful if you globally activated transaction management in
your settings file and want the default behavior in some view functions.
"""
+ warnings.warn("autocommit is deprecated in favor of set_autocommit.",
+ PendingDeprecationWarning, stacklevel=2)
+
def entering(using):
enter_transaction_management(managed=False, using=using)
- managed(False, using=using)
def exiting(exc_value, using):
leave_transaction_management(using=using)
@@ -238,9 +425,11 @@ def commit_on_success(using=None):
a rollback is made. This is one of the most common ways to do transaction
control in Web apps.
"""
+ warnings.warn("commit_on_success is deprecated in favor of atomic.",
+ PendingDeprecationWarning, stacklevel=2)
+
def entering(using):
enter_transaction_management(using=using)
- managed(True, using=using)
def exiting(exc_value, using):
try:
@@ -266,11 +455,37 @@ def commit_manually(using=None):
own -- it's up to the user to call the commit and rollback functions
themselves.
"""
+ warnings.warn("commit_manually is deprecated in favor of set_autocommit.",
+ PendingDeprecationWarning, stacklevel=2)
+
def entering(using):
enter_transaction_management(using=using)
- managed(True, using=using)
def exiting(exc_value, using):
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
+
+def commit_on_success_unless_managed(using=None, savepoint=False):
+ """
+ Transitory API to preserve backwards-compatibility while refactoring.
+
+ Once the legacy transaction management is fully deprecated, this should
+ simply be replaced by atomic_if_autocommit. Until then, it's necessary to
+ avoid making a commit where Django didn't use to, since entering atomic in
+ managed mode triggers a commmit.
+
+ Unlike atomic, savepoint defaults to False because that's closer to the
+ legacy behavior.
+ """
+ connection = get_connection(using)
+ if connection.autocommit or connection.in_atomic_block:
+ return atomic_if_autocommit(using, savepoint)
+ else:
+ def entering(using):
+ pass
+
+ def exiting(exc_value, using):
+ set_dirty(using=using)
+
+ return _transaction_func(entering, exiting, using)
View
8 django/db/utils.py
@@ -2,6 +2,7 @@
import os
import pkgutil
from threading import local
+import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
@@ -158,6 +159,13 @@ def ensure_defaults(self, alias):
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
+ conn.setdefault('ATOMIC_REQUESTS', False)
+ if settings.TRANSACTIONS_MANAGED:
+ warnings.warn(
+ "TRANSACTIONS_MANAGED is deprecated. Use AUTOCOMMIT instead.",
+ PendingDeprecationWarning, stacklevel=2)
+ conn.setdefault('AUTOCOMMIT', False)
+ conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
View
16 django/middleware/transaction.py
@@ -1,4 +1,7 @@
-from django.db import transaction
+import warnings
+
+from django.core.exceptions import MiddlewareNotUsed
+from django.db import connection, transaction
class TransactionMiddleware(object):
"""
@@ -7,10 +10,17 @@ class TransactionMiddleware(object):
commit, the commit is done when a successful response is created. If an
exception happens, the database is rolled back.
"""
+
+ def __init__(self):
+ warnings.warn(
+ "TransactionMiddleware is deprecated in favor of ATOMIC_REQUESTS.",
+ PendingDeprecationWarning, stacklevel=2)
+ if connection.settings_dict['ATOMIC_REQUESTS']:
+ raise MiddlewareNotUsed
+
def process_request(self, request):
"""Enters transaction management"""
transaction.enter_transaction_management()
- transaction.managed(True)
def process_exception(self, request, exception):
"""Rolls back the database and leaves transaction management"""
@@ -24,7 +34,7 @@ def process_exception(self, request, exception):
def process_response(self, request, response):
"""Commits and leaves transaction management."""
- if transaction.is_managed():
+ if not transaction.get_autocommit():
if transaction.is_dirty():
# Note: it is possible that the commit fails. If the reason is
# closed connection or some similar reason, then there is
View
36 django/test/testcases.py
@@ -67,7 +67,6 @@ def to_list(value):
real_rollback = transaction.rollback
real_enter_transaction_management = transaction.enter_transaction_management
real_leave_transaction_management = transaction.leave_transaction_management
-real_managed = transaction.managed
real_abort = transaction.abort
def nop(*args, **kwargs):
@@ -78,7 +77,6 @@ def disable_transaction_methods():
transaction.rollback = nop
transaction.enter_transaction_management = nop
transaction.leave_transaction_management = nop
- transaction.managed = nop
transaction.abort = nop
def restore_transaction_methods():
@@ -86,7 +84,6 @@ def restore_transaction_methods():
transaction.rollback = real_rollback
transaction.enter_transaction_management = real_enter_transaction_management
transaction.leave_transaction_management = real_leave_transaction_management
- transaction.managed = real_managed
transaction.abort = real_abort
@@ -157,14 +154,6 @@ def __init__(self, *args, **kwargs):
doctest.DocTestRunner.__init__(self, *args, **kwargs)
self.optionflags = doctest.ELLIPSIS
- def report_unexpected_exception(self, out, test, example, exc_info):
- doctest.DocTestRunner.report_unexpected_exception(self, out, test,
- example, exc_info)
- # Rollback, in case of database errors. Otherwise they'd have
- # side effects on other tests.
- for conn in connections:
- transaction.rollback_unless_managed(using=conn)
-
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
@@ -490,14 +479,10 @@ def _reset_sequences(self, db_name):
conn.ops.sequence_reset_by_name_sql(no_style(),
conn.introspection.sequence_list())
if sql_list:
- try:
+ with transaction.commit_on_success_unless_managed(using=db_name):
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
- except Exception:
- transaction.rollback_unless_managed(using=db_name)
- raise
- transaction.commit_unless_managed(using=db_name)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
@@ -537,11 +522,6 @@ def _post_teardown(self):
conn.close()
def _fixture_teardown(self):
- # Roll back any pending transactions in order to avoid a deadlock
- # during flush when TEST_MIRROR is used (#18984).
- for conn in connections.all():
- conn.rollback_unless_managed()
-
for db in self._databases_names(include_mirrors=False):
call_command('flush', verbosity=0, interactive=False, database=db,
skip_validation=True, reset_sequences=False)
@@ -831,9 +811,11 @@ def _fixture_setup(self):
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
+ self.atomics = {}
for db_name in self._databases_names():
- transaction.enter_transaction_management(using=db_name)
- transaction.managed(True, using=db_name)
+ self.atomics[db_name] = transaction.atomic(using=db_name)
+ self.atomics[db_name].__enter__()
+ # Remove this when the legacy transaction management goes away.
disable_transaction_methods()
from django.contrib.sites.models import Site
@@ -853,10 +835,12 @@ def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
+ # Remove this when the legacy transaction management goes away.
restore_transaction_methods()
- for db in self._databases_names():
- transaction.rollback(using=db)
- transaction.leave_transaction_management(using=db)
+ for db_name in reversed(self._databases_names()):
+ # Hack to force a rollback
+ connections[db_name].needs_rollback = True
+ self.atomics[db_name].__exit__(None, None, None)
def _deferredSkip(condition, reason):
View
19 docs/internals/deprecation.txt
@@ -329,6 +329,15 @@ these changes.
1.8
---
+* The following transaction management APIs will be removed:
+
+ - ``TransactionMiddleware``,
+ - the decorators and context managers ``autocommit``, ``commit_on_success``,
+ and ``commit_manually``,
+ - the ``TRANSACTIONS_MANAGED`` setting.
+
+ Upgrade paths are described in :ref:`transactions-upgrading-from-1.5`.
+
* The :ttag:`cycle` and :ttag:`firstof` template tags will auto-escape their
arguments. In 1.6 and 1.7, this behavior is provided by the version of these
tags in the ``future`` template tag library.
@@ -339,8 +348,6 @@ these changes.
* ``Model._meta.module_name`` was renamed to ``model_name``.
-* The private API ``django.db.close_connection`` will be removed.
-
* Remove the backward compatible shims introduced to rename ``get_query_set``
and similar queryset methods. This affects the following classes:
``BaseModelAdmin``, ``ChangeList``, ``BaseCommentNode``,
@@ -350,6 +357,14 @@ these changes.
* Remove the backward compatible shims introduced to rename the attributes
``ChangeList.root_query_set`` and ``ChangeList.query_set``.
+* The following private APIs will be removed:
+ - ``django.db.close_connection()``
+ - ``django.db.backends.creation.BaseDatabaseCreation.set_autocommit()``
+ - ``django.db.transaction.is_managed()``
+ - ``django.db.transaction.managed()``
+ - ``django.db.transaction.commit_unless_managed()``
+ - ``django.db.transaction.rollback_unless_managed()``
+