Permalink
Browse files

newforms-admin: Merged from trunk up to [7499].

git-svn-id: http://code.djangoproject.com/svn/django/branches/newforms-admin@7500 bcc190cf-cafb-0310-a4f2-bffc1f526a37
  • Loading branch information...
1 parent 738e6d9 commit 886005078d66bd779ad3d6434d5699fbc17cfed1 @brosner brosner committed Apr 28, 2008
@@ -51,6 +51,7 @@ class BaseDatabaseFeatures(object):
uses_case_insensitive_names = False
uses_custom_query_class = False
empty_fetchmany_value = []
+ update_can_self_select = True
class BaseDatabaseOperations(object):
"""
@@ -63,6 +63,7 @@ class DatabaseFeatures(BaseDatabaseFeatures):
autoindexes_primary_keys = False
inline_fk_references = False
empty_fetchmany_value = ()
+ update_can_self_select = False
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
@@ -67,6 +67,7 @@ class DatabaseFeatures(BaseDatabaseFeatures):
autoindexes_primary_keys = False
inline_fk_references = False
empty_fetchmany_value = ()
+ update_can_self_select = False
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
@@ -9,6 +9,7 @@
from django.db import get_creation_module
from django.db.models import signals
+from django.db.models.query_utils import QueryWrapper
from django.dispatch import dispatcher
from django.conf import settings
from django.core import validators
@@ -224,6 +225,9 @@ def get_db_prep_save(self, value):
def get_db_prep_lookup(self, lookup_type, value):
"Returns field's value prepared for database lookup."
+ if hasattr(value, 'as_sql'):
+ sql, params = value.as_sql()
+ return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('exact', 'regex', 'iregex', 'gt', 'gte', 'lt', 'lte', 'month', 'day', 'search'):
return [value]
elif lookup_type in ('range', 'in'):
View
@@ -28,6 +28,17 @@ def __init__(self, model=None, query=None):
# PYTHON MAGIC METHODS #
########################
+ def __getstate__(self):
+ """
+ Allows the Queryset to be pickled.
+ """
+ # Force the cache to be fully populated.
+ len(self)
+
+ obj_dict = self.__dict__.copy()
+ obj_dict['_iter'] = None
+ return obj_dict
+
def __repr__(self):
return repr(list(self))
@@ -37,7 +48,7 @@ def __len__(self):
# whilst not messing up any existing iterators against the queryset.
if self._result_cache is None:
if self._iter:
- self._result_cache = list(self._iter())
+ self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
@@ -497,9 +508,6 @@ def __init__(self, *args, **kwargs):
# QuerySet.clone() will also set up the _fields attribute with the
# names of the model fields to select.
- def __iter__(self):
- return self.iterator()
-
def iterator(self):
self.query.trim_extra_select(self.extra_names)
names = self.query.extra_select.keys() + self.field_names
@@ -99,6 +99,24 @@ def __deepcopy__(self, memo):
memo[id(self)] = result
return result
+ def __getstate__(self):
+ """
+ Pickling support.
+ """
+ obj_dict = self.__dict__.copy()
+ del obj_dict['connection']
+ return obj_dict
+
+ def __setstate__(self, obj_dict):
+ """
+ Unpickling support.
+ """
+ self.__dict__.update(obj_dict)
+ # XXX: Need a better solution for this when multi-db stuff is
+ # supported. It's the only class-reference to the module-level
+ # connection variable.
+ self.connection = connection
+
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
@@ -895,9 +913,15 @@ def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
Add a single filter to the query. The 'filter_expr' is a pair:
(filter_string, value). E.g. ('name__contains', 'fred')
- If 'negate' is True, this is an exclude() filter. If 'trim' is True, we
- automatically trim the final join group (used internally when
- constructing nested queries).
+ If 'negate' is True, this is an exclude() filter. It's important to
+ note that this method does not negate anything in the where-clause
+ object when inserting the filter constraints. This is because negated
+ filters often require multiple calls to add_filter() and the negation
+ should only happen once. So the caller is responsible for this (the
+ caller will normally be add_q(), so that as an example).
+
+ If 'trim' is True, we automatically trim the final join group (used
+ internally when constructing nested queries).
If 'can_reuse' is a set, we are processing a component of a
multi-component filter (e.g. filter(Q1, Q2)). In this case, 'can_reuse'
@@ -1001,7 +1025,6 @@ def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
self.where.add((alias, col, field, lookup_type, value), connector)
if negate:
- self.where.negate()
for alias in join_list:
self.promote_alias(alias)
if final > 1 and lookup_type != 'isnull':
@@ -1039,12 +1062,12 @@ def add_q(self, q_object, used_aliases=None):
self.where.start_subtree(connector)
self.add_q(child, used_aliases)
self.where.end_subtree()
- if q_object.negated:
- self.where.children[-1].negate()
else:
self.add_filter(child, connector, q_object.negated,
can_reuse=used_aliases)
connector = q_object.connector
+ if q_object.negated:
+ self.where.negate()
if subtree:
self.where.end_subtree()
@@ -159,20 +159,37 @@ def pre_sql_setup(self):
# from other tables.
query = self.clone(klass=Query)
query.bump_prefix()
- query.select = []
query.extra_select = {}
- query.add_fields([query.model._meta.pk.name])
+ first_table = query.tables[0]
+ if query.alias_refcount[first_table] == 1:
+ # We can remove one table from the inner query.
+ query.unref_alias(first_table)
+ for i in xrange(1, len(query.tables)):
+ table = query.tables[i]
+ if query.alias_refcount[table]:
+ break
+ join_info = query.alias_map[table]
+ query.select = [(join_info[RHS_ALIAS], join_info[RHS_JOIN_COL])]
+ must_pre_select = False
+ else:
+ query.select = []
+ query.add_fields([query.model._meta.pk.name])
+ must_pre_select = not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.where = self.where_class()
- if self.related_updates:
+ if self.related_updates or must_pre_select:
+ # Either we're using the idents in multiple update queries (so
+ # don't want them to change), or the db backend doesn't support
+ # selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.add_filter(('pk__in', idents))
self.related_ids = idents
else:
+ # The fast path. Filters and updates in one query.
self.add_filter(('pk__in', query))
for alias in self.tables[1:]:
self.alias_refcount[alias] = 0
View
@@ -376,6 +376,29 @@ You can evaluate a ``QuerySet`` in the following ways:
iterating over a ``QuerySet`` will take advantage of your database to
load data and instantiate objects only as you need them.
+
+Pickling QuerySets
+~~~~~~~~~~~~~~~~~~
+
+If you pickle_ a ``QuerySet``, this will also force all the results to be
+loaded into memory prior to pickling. This is because pickling is usually used
+as a precursor to caching and when the cached queryset is reloaded, you want
+the results to already be present. This means that when you unpickle a
+``QuerySet``, it contains the results at the moment it was pickled, rather
+than the results that are currently in the database.
+
+If you only want to pickle the necessary information to recreate the
+``Queryset`` from the database at a later time, pickle the ``query`` attribute
+of the ``QuerySet``. You can then recreate the original ``QuerySet`` (without
+any results loaded) using some code like this::
+
+ >>> import pickle
+ >>> query = pickle.loads(s) # Assuming 's' is the pickled string.
+ >>> qs = MyModel.objects.all()
+ >>> qs.query = query # Restore the original 'query'.
+
+.. _pickle: http://docs.python.org/lib/module-pickle.html
+
Limiting QuerySets
------------------
@@ -117,6 +117,24 @@ class LoopZ(models.Model):
class Meta:
ordering = ['z']
+# A model and custom default manager combination.
+class CustomManager(models.Manager):
+ def get_query_set(self):
+ return super(CustomManager, self).get_query_set().filter(public=True,
+ tag__name='t1')
+
+class ManagedModel(models.Model):
+ data = models.CharField(max_length=10)
+ tag = models.ForeignKey(Tag)
+ public = models.BooleanField(default=True)
+
+ objects = CustomManager()
+ normal_manager = models.Manager()
+
+ def __unicode__(self):
+ return self.data
+
+
__test__ = {'API_TESTS':"""
>>> t1 = Tag(name='t1')
>>> t1.save()
@@ -658,5 +676,30 @@ class Meta:
works.
>>> Item.objects.values('note__note').order_by('queries_note.note', 'id')
[{'note__note': u'n2'}, {'note__note': u'n3'}, {'note__note': u'n3'}, {'note__note': u'n3'}]
+
+Bug #7096 -- Make sure exclude() with multiple conditions continues to work.
+>>> Tag.objects.filter(parent=t1, name='t3').order_by('name')
+[<Tag: t3>]
+>>> Tag.objects.exclude(parent=t1, name='t3').order_by('name')
+[<Tag: t1>, <Tag: t2>, <Tag: t4>, <Tag: t5>]
+>>> Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct()
+[<Item: four>, <Item: three>, <Item: two>]
+>>> Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name')
+[<Item: four>, <Item: three>]
+
+More twisted cases, involving nested negations.
+>>> Item.objects.exclude(~Q(tags__name='t1', name='one'))
+[<Item: one>]
+>>> Item.objects.filter(~Q(tags__name='t1', name='one'), name='two')
+[<Item: two>]
+>>> Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two')
+[<Item: four>, <Item: one>, <Item: three>]
+
+Bug #7095
+Updates that are filtered on the model being updated are somewhat tricky to get
+in MySQL. This exercises that case.
+>>> mm = ManagedModel.objects.create(data='mm1', tag=t1, public=True)
+>>> ManagedModel.objects.update(data='mm')
+
"""}

0 comments on commit 8860050

Please sign in to comment.