Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
file 1931 lines (1684 sloc) 77.548 kb
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""

import copy
import itertools
import sys

from django.conf import settings
from django.core import exceptions
from django.db import connections, router, transaction, IntegrityError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import AutoField, Empty
from django.db.models.query_utils import (Q, select_related_descend,
    deferred_class_factory, InvalidQuery)
from django.db.models.deletion import Collector
from django.db.models.sql.constants import CURSOR
from django.db.models import sql
from django.utils.functional import partition
from django.utils import six
from django.utils import timezone

# The maximum number (one less than the max to be precise) of results to fetch
# in a get() query
MAX_GET_RESULTS = 20

# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20

# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet


def _pickle_queryset(class_bases, class_dict):
    """
Used by `__reduce__` to create the initial version of the `QuerySet` class
onto which the output of `__getstate__` will be applied.

See `__reduce__` for more details.
"""
    new = Empty()
    new.__class__ = type(class_bases[0].__name__, class_bases, class_dict)
    return new


class QuerySet(object):
    """
Represents a lazy database lookup for a set of objects.
"""

    def __init__(self, model=None, query=None, using=None, hints=None):
        self.model = model
        self._db = using
        self._hints = hints or {}
        self.query = query or sql.Query(self.model)
        self._result_cache = None
        self._sticky_filter = False
        self._for_write = False
        self._prefetch_related_lookups = []
        self._prefetch_done = False
        self._known_related_objects = {} # {rel_field, {pk: rel_obj}}

    def as_manager(cls):
        # Address the circular dependency between `Queryset` and `Manager`.
        from django.db.models.manager import Manager
        return Manager.from_queryset(cls)()
    as_manager.queryset_only = True
    as_manager = classmethod(as_manager)

    ########################
    # PYTHON MAGIC METHODS #
    ########################

    def __deepcopy__(self, memo):
        """
Deep copy of a QuerySet doesn't populate the cache
"""
        obj = self.__class__()
        for k, v in self.__dict__.items():
            if k == '_result_cache':
                obj.__dict__[k] = None
            else:
                obj.__dict__[k] = copy.deepcopy(v, memo)
        return obj

    def __getstate__(self):
        """
Allows the QuerySet to be pickled.
"""
        # Force the cache to be fully populated.
        self._fetch_all()
        obj_dict = self.__dict__.copy()
        return obj_dict

    def __reduce__(self):
        """
Used by pickle to deal with the types that we create dynamically when
specialized queryset such as `ValuesQuerySet` are used in conjunction
with querysets that are *subclasses* of `QuerySet`.

See `_clone` implementation for more details.
"""
        if hasattr(self, '_specialized_queryset_class'):
            class_bases = (
                self._specialized_queryset_class,
                self._base_queryset_class,
            )
            class_dict = {
                '_specialized_queryset_class': self._specialized_queryset_class,
                '_base_queryset_class': self._base_queryset_class,
            }
            return _pickle_queryset, (class_bases, class_dict), self.__getstate__()
        return super(QuerySet, self).__reduce__()

    def __repr__(self):
        data = list(self[:REPR_OUTPUT_SIZE + 1])
        if len(data) > REPR_OUTPUT_SIZE:
            data[-1] = "...(remaining elements truncated)..."
        return repr(data)

    def __len__(self):
        self._fetch_all()
        return len(self._result_cache)

    def __iter__(self):
        """
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location (see resolve_columns(),
resolve_aggregate()).
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
        self._fetch_all()
        return iter(self._result_cache)

    def __nonzero__(self):
        self._fetch_all()
        return bool(self._result_cache)

    def __getitem__(self, k):
        """
Retrieves an item or slice from the set of results.
"""
        if not isinstance(k, (slice,) + six.integer_types):
            raise TypeError
        assert ((not isinstance(k, slice) and (k >= 0)) or
                (isinstance(k, slice) and (k.start is None or k.start >= 0) and
                 (k.stop is None or k.stop >= 0))), \
            "Negative indexing is not supported."

        if self._result_cache is not None:
            return self._result_cache[k]

        if isinstance(k, slice):
            qs = self._clone()
            if k.start is not None:
                start = int(k.start)
            else:
                start = None
            if k.stop is not None:
                stop = int(k.stop)
            else:
                stop = None
            qs.query.set_limits(start, stop)
            return list(qs)[::k.step] if k.step else qs

        qs = self._clone()
        qs.query.set_limits(k, k + 1)
        return list(qs)[0]

    def __and__(self, other):
        self._merge_sanity_check(other)
        if isinstance(other, EmptyQuerySet):
            return other
        if isinstance(self, EmptyQuerySet):
            return self
        combined = self._clone()
        combined._merge_known_related_objects(other)
        combined.query.combine(other.query, sql.AND)
        return combined

    def __or__(self, other):
        self._merge_sanity_check(other)
        if isinstance(self, EmptyQuerySet):
            return other
        if isinstance(other, EmptyQuerySet):
            return self
        combined = self._clone()
        combined._merge_known_related_objects(other)
        combined.query.combine(other.query, sql.OR)
        return combined

    ####################################
    # METHODS THAT DO DATABASE QUERIES #
    ####################################

    def iterator(self):
        """
An iterator over the results from applying this QuerySet to the
database.
"""
        fill_cache = False
        if connections[self.db].features.supports_select_related:
            fill_cache = self.query.select_related
        if isinstance(fill_cache, dict):
            requested = fill_cache
        else:
            requested = None
        max_depth = self.query.max_depth

        extra_select = list(self.query.extra_select)
        aggregate_select = list(self.query.aggregate_select)

        only_load = self.query.get_loaded_field_names()
        if not fill_cache:
            fields = self.model._meta.concrete_fields

        load_fields = []
        # If only/defer clauses have been specified,
        # build the list of fields that are to be loaded.
        if only_load:
            for field, model in self.model._meta.get_concrete_fields_with_model():
                if model is None:
                    model = self.model
                try:
                    if field.name in only_load[model]:
                        # Add a field that has been explicitly included
                        load_fields.append(field.name)
                except KeyError:
                    # Model wasn't explicitly listed in the only_load table
                    # Therefore, we need to load all fields from this model
                    load_fields.append(field.name)

        index_start = len(extra_select)
        aggregate_start = index_start + len(load_fields or self.model._meta.concrete_fields)

        skip = None
        if load_fields and not fill_cache:
            # Some fields have been deferred, so we have to initialize
            # via keyword arguments.
            skip = set()
            init_list = []
            for field in fields:
                if field.name not in load_fields:
                    skip.add(field.attname)
                else:
                    init_list.append(field.attname)
            model_cls = deferred_class_factory(self.model, skip)

        # Cache db and model outside the loop
        db = self.db
        model = self.model
        compiler = self.query.get_compiler(using=db)
        if fill_cache:
            klass_info = get_klass_info(model, max_depth=max_depth,
                                        requested=requested, only_load=only_load)
        for row in compiler.results_iter():
            if fill_cache:
                obj, _ = get_cached_row(row, index_start, db, klass_info,
                                        offset=len(aggregate_select))
            else:
                # Omit aggregates in object creation.
                row_data = row[index_start:aggregate_start]
                if skip:
                    obj = model_cls(**dict(zip(init_list, row_data)))
                else:
                    obj = model(*row_data)

                # Store the source database of the object
                obj._state.db = db
                # This object came from the database; it's not being added.
                obj._state.adding = False

            if extra_select:
                for i, k in enumerate(extra_select):
                    setattr(obj, k, row[i])

            # Add the aggregates to the model
            if aggregate_select:
                for i, aggregate in enumerate(aggregate_select):
                    setattr(obj, aggregate, row[i + aggregate_start])

            # Add the known related objects to the model, if there are any
            if self._known_related_objects:
                for field, rel_objs in self._known_related_objects.items():
                    pk = getattr(obj, field.get_attname())
                    try:
                        rel_obj = rel_objs[pk]
                    except KeyError:
                        pass # may happen in qs1 | qs2 scenarios
                    else:
                        setattr(obj, field.name, rel_obj)

            yield obj

    def aggregate(self, *args, **kwargs):
        """
Returns a dictionary containing the calculations (aggregation)
over the current queryset

If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
        if self.query.distinct_fields:
            raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
        for arg in args:
            kwargs[arg.default_alias] = arg

        query = self.query.clone()
        force_subq = query.low_mark != 0 or query.high_mark is not None
        for (alias, aggregate_expr) in kwargs.items():
            query.add_aggregate(aggregate_expr, self.model, alias,
                                is_summary=True)
        return query.get_aggregation(using=self.db, force_subq=force_subq)

    def count(self):
        """
Performs a SELECT COUNT() and returns the number of records as an
integer.

If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
        if self._result_cache is not None:
            return len(self._result_cache)

        return self.query.get_count(using=self.db)

    def get(self, *args, **kwargs):
        """
Performs the query and returns a single object matching the given
keyword arguments.
"""
        clone = self.filter(*args, **kwargs)
        if self.query.can_filter():
            clone = clone.order_by()
        clone = clone[:MAX_GET_RESULTS + 1]
        num = len(clone)
        if num == 1:
            return clone._result_cache[0]
        if not num:
            raise self.model.DoesNotExist(
                "%s matching query does not exist." %
                self.model._meta.object_name)
        raise self.model.MultipleObjectsReturned(
            "get() returned more than one %s -- it returned %s!" % (
                self.model._meta.object_name,
                num if num <= MAX_GET_RESULTS else 'more than %s' % MAX_GET_RESULTS
            )
        )

    def create(self, **kwargs):
        """
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
        obj = self.model(**kwargs)
        self._for_write = True
        obj.save(force_insert=True, using=self.db)
        return obj

    def bulk_create(self, objs, batch_size=None):
        """
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field.
"""
        # So this case is fun. When you bulk insert you don't get the primary
        # keys back (if it's an autoincrement), so you can't insert into the
        # child tables which references this. There are two workarounds, 1)
        # this could be implemented if you didn't have an autoincrement pk,
        # and 2) you could do it by doing O(n) normal inserts into the parent
        # tables to get the primary keys back, and then doing a single bulk
        # insert into the childmost table. Some databases might allow doing
        # this by using RETURNING clause for the insert query. We're punting
        # on these for now because they are relatively rare cases.
        assert batch_size is None or batch_size > 0
        if self.model._meta.parents:
            raise ValueError("Can't bulk create an inherited model")
        if not objs:
            return objs
        self._for_write = True
        connection = connections[self.db]
        fields = self.model._meta.local_concrete_fields
        with transaction.atomic(using=self.db, savepoint=False):
            if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
                    and self.model._meta.has_auto_field):
                self._batched_insert(objs, fields, batch_size)
            else:
                objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
                if objs_with_pk:
                    self._batched_insert(objs_with_pk, fields, batch_size)
                if objs_without_pk:
                    fields = [f for f in fields if not isinstance(f, AutoField)]
                    self._batched_insert(objs_without_pk, fields, batch_size)

        return objs

    def get_or_create(self, defaults=None, **kwargs):
        """
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
        lookup, params = self._extract_model_params(defaults, **kwargs)
        self._for_write = True
        try:
            return self.get(**lookup), False
        except self.model.DoesNotExist:
            return self._create_object_from_params(lookup, params)

    def update_or_create(self, defaults=None, **kwargs):
        """
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
        defaults = defaults or {}
        lookup, params = self._extract_model_params(defaults, **kwargs)
        self._for_write = True
        try:
            obj = self.get(**lookup)
        except self.model.DoesNotExist:
            obj, created = self._create_object_from_params(lookup, params)
            if created:
                return obj, created
        for k, v in six.iteritems(defaults):
            setattr(obj, k, v)

        with transaction.atomic(using=self.db, savepoint=False):
            obj.save(using=self.db)
        return obj, False

    def _create_object_from_params(self, lookup, params):
        """
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
        obj = self.model(**params)
        try:
            with transaction.atomic(using=self.db):
                obj.save(force_insert=True, using=self.db)
            return obj, True
        except IntegrityError:
            exc_info = sys.exc_info()
            try:
                return self.get(**lookup), False
            except self.model.DoesNotExist:
                pass
            six.reraise(*exc_info)

    def _extract_model_params(self, defaults, **kwargs):
        """
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
        defaults = defaults or {}
        lookup = kwargs.copy()
        for f in self.model._meta.fields:
            if f.attname in lookup:
                lookup[f.name] = lookup.pop(f.attname)
        params = dict((k, v) for k, v in kwargs.items() if LOOKUP_SEP not in k)
        params.update(defaults)
        return lookup, params

    def _earliest_or_latest(self, field_name=None, direction="-"):
        """
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
        order_by = field_name or getattr(self.model._meta, 'get_latest_by')
        assert bool(order_by), "earliest() and latest() require either a "\
            "field_name parameter or 'get_latest_by' in the model"
        assert self.query.can_filter(), \
            "Cannot change a query once a slice has been taken."
        obj = self._clone()
        obj.query.set_limits(high=1)
        obj.query.clear_ordering(force_empty=True)
        obj.query.add_ordering('%s%s' % (direction, order_by))
        return obj.get()

    def earliest(self, field_name=None):
        return self._earliest_or_latest(field_name=field_name, direction="")

    def latest(self, field_name=None):
        return self._earliest_or_latest(field_name=field_name, direction="-")

    def first(self):
        """
Returns the first object of a query, returns None if no match is found.
"""
        qs = self if self.ordered else self.order_by('pk')
        try:
            return qs[0]
        except IndexError:
            return None

    def last(self):
        """
Returns the last object of a query, returns None if no match is found.
"""
        qs = self.reverse() if self.ordered else self.order_by('-pk')
        try:
            return qs[0]
        except IndexError:
            return None

    def in_bulk(self, id_list):
        """
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
        assert self.query.can_filter(), \
            "Cannot use 'limit' or 'offset' with in_bulk"
        if not id_list:
            return {}
        qs = self.filter(pk__in=id_list).order_by()
        return dict((obj._get_pk_val(), obj) for obj in qs)

    def delete(self):
        """
Deletes the records in the current QuerySet.
"""
        assert self.query.can_filter(), \
            "Cannot use 'limit' or 'offset' with delete."

        del_query = self._clone()

        # The delete is actually 2 queries - one to find related objects,
        # and one to delete. Make sure that the discovery of related
        # objects is performed on the same database as the deletion.
        del_query._for_write = True

        # Disable non-supported fields.
        del_query.query.select_for_update = False
        del_query.query.select_related = False
        del_query.query.clear_ordering(force_empty=True)

        collector = Collector(using=del_query.db)
        collector.collect(del_query)
        collector.delete()

        # Clear the result cache, in case this QuerySet gets reused.
        self._result_cache = None
    delete.alters_data = True
    delete.queryset_only = True

    def _raw_delete(self, using):
        """
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
        sql.DeleteQuery(self.model).delete_qs(self, using)
    _raw_delete.alters_data = True

    def update(self, **kwargs):
        """
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
        assert self.query.can_filter(), \
            "Cannot update a query once a slice has been taken."
        self._for_write = True
        query = self.query.clone(sql.UpdateQuery)
        query.add_update_values(kwargs)
        with transaction.atomic(using=self.db, savepoint=False):
            rows = query.get_compiler(self.db).execute_sql(CURSOR)
        self._result_cache = None
        return rows
    update.alters_data = True

    def _update(self, values):
        """
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
        assert self.query.can_filter(), \
            "Cannot update a query once a slice has been taken."
        query = self.query.clone(sql.UpdateQuery)
        query.add_update_fields(values)
        self._result_cache = None
        return query.get_compiler(self.db).execute_sql(CURSOR)
    _update.alters_data = True
    _update.queryset_only = False

    def exists(self):
        if self._result_cache is None:
            return self.query.has_results(using=self.db)
        return bool(self._result_cache)

    def _prefetch_related_objects(self):
        # This method can only be called once the result cache has been filled.
        prefetch_related_objects(self._result_cache, self._prefetch_related_lookups)
        self._prefetch_done = True

    ##################################################
    # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
    ##################################################

    def raw(self, raw_query, params=None, translations=None, using=None):
        if using is None:
            using = self.db
        return RawQuerySet(raw_query, model=self.model,
                params=params, translations=translations,
                using=using)

    def values(self, *fields):
        return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)

    def values_list(self, *fields, **kwargs):
        flat = kwargs.pop('flat', False)
        if kwargs:
            raise TypeError('Unexpected keyword arguments to values_list: %s'
                    % (list(kwargs),))
        if flat and len(fields) > 1:
            raise TypeError("'flat' is not valid when values_list is called with more than one field.")
        return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
                _fields=fields)

    def dates(self, field_name, kind, order='ASC'):
        """
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
        assert kind in ("year", "month", "day"), \
            "'kind' must be one of 'year', 'month' or 'day'."
        assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
        return self._clone(klass=DateQuerySet, setup=True,
             _field_name=field_name, _kind=kind, _order=order)

    def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
        """
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
        assert kind in ("year", "month", "day", "hour", "minute", "second"), \
            "'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
        assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
        if settings.USE_TZ:
            if tzinfo is None:
                tzinfo = timezone.get_current_timezone()
        else:
            tzinfo = None
        return self._clone(klass=DateTimeQuerySet, setup=True,
                _field_name=field_name, _kind=kind, _order=order, _tzinfo=tzinfo)

    def none(self):
        """
Returns an empty QuerySet.
"""
        clone = self._clone()
        clone.query.set_empty()
        return clone

    ##################################################################
    # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
    ##################################################################

    def all(self):
        """
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
        return self._clone()

    def filter(self, *args, **kwargs):
        """
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
        return self._filter_or_exclude(False, *args, **kwargs)

    def exclude(self, *args, **kwargs):
        """
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
        return self._filter_or_exclude(True, *args, **kwargs)

    def _filter_or_exclude(self, negate, *args, **kwargs):
        if args or kwargs:
            assert self.query.can_filter(), \
                "Cannot filter a query once a slice has been taken."

        clone = self._clone()
        if negate:
            clone.query.add_q(~Q(*args, **kwargs))
        else:
            clone.query.add_q(Q(*args, **kwargs))
        return clone

    def complex_filter(self, filter_obj):
        """
Returns a new QuerySet instance with filter_obj added to the filters.

filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.

This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
        if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
            clone = self._clone()
            clone.query.add_q(filter_obj)
            return clone
        else:
            return self._filter_or_exclude(None, **filter_obj)

    def select_for_update(self, nowait=False):
        """
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
        obj = self._clone()
        obj._for_write = True
        obj.query.select_for_update = True
        obj.query.select_for_update_nowait = nowait
        return obj

    def select_related(self, *fields):
        """
Returns a new QuerySet instance that will select related objects.

If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.

If select_related(None) is called, the list is cleared.
"""
        obj = self._clone()
        if fields == (None,):
            obj.query.select_related = False
        elif fields:
            obj.query.add_select_related(fields)
        else:
            obj.query.select_related = True
        return obj

    def prefetch_related(self, *lookups):
        """
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.

When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the
the list is cleared.
"""
        clone = self._clone()
        if lookups == (None,):
            clone._prefetch_related_lookups = []
        else:
            clone._prefetch_related_lookups.extend(lookups)
        return clone

    def annotate(self, *args, **kwargs):
        """
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
"""
        for arg in args:
            if arg.default_alias in kwargs:
                raise ValueError("The named annotation '%s' conflicts with the "
                                 "default name for another annotation."
                                 % arg.default_alias)
            kwargs[arg.default_alias] = arg

        names = getattr(self, '_fields', None)
        if names is None:
            names = set(self.model._meta.get_all_field_names())
        for aggregate in kwargs:
            if aggregate in names:
                raise ValueError("The annotation '%s' conflicts with a field on "
                    "the model." % aggregate)

        obj = self._clone()

        obj._setup_aggregate_query(list(kwargs))

        # Add the aggregates to the query
        for (alias, aggregate_expr) in kwargs.items():
            obj.query.add_aggregate(aggregate_expr, self.model, alias,
                is_summary=False)

        return obj

    def order_by(self, *field_names):
        """
Returns a new QuerySet instance with the ordering changed.
"""
        assert self.query.can_filter(), \
            "Cannot reorder a query once a slice has been taken."
        obj = self._clone()
        obj.query.clear_ordering(force_empty=False)
        obj.query.add_ordering(*field_names)
        return obj

    def distinct(self, *field_names):
        """
Returns a new QuerySet instance that will select only distinct results.
"""
        assert self.query.can_filter(), \
            "Cannot create distinct fields once a slice has been taken."
        obj = self._clone()
        obj.query.add_distinct_fields(*field_names)
        return obj

    def extra(self, select=None, where=None, params=None, tables=None,
              order_by=None, select_params=None):
        """
Adds extra SQL fragments to the query.
"""
        assert self.query.can_filter(), \
            "Cannot change a query once a slice has been taken"
        clone = self._clone()
        clone.query.add_extra(select, select_params, where, params, tables, order_by)
        return clone

    def reverse(self):
        """
Reverses the ordering of the QuerySet.
"""
        clone = self._clone()
        clone.query.standard_ordering = not clone.query.standard_ordering
        return clone

    def defer(self, *fields):
        """
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
        clone = self._clone()
        if fields == (None,):
            clone.query.clear_deferred_loading()
        else:
            clone.query.add_deferred_loading(fields)
        return clone

    def only(self, *fields):
        """
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
        if fields == (None,):
            # Can only pass None to defer(), not only(), as the rest option.
            # That won't stop people trying to do this, so let's be explicit.
            raise TypeError("Cannot pass None as an argument to only().")
        clone = self._clone()
        clone.query.add_immediate_loading(fields)
        return clone

    def using(self, alias):
        """
Selects which database this QuerySet should excecute its query against.
"""
        clone = self._clone()
        clone._db = alias
        return clone

    ###################################
    # PUBLIC INTROSPECTION ATTRIBUTES #
    ###################################

    def ordered(self):
        """
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
        if self.query.extra_order_by or self.query.order_by:
            return True
        elif self.query.default_ordering and self.query.get_meta().ordering:
            return True
        else:
            return False
    ordered = property(ordered)

    @property
    def db(self):
        "Return the database that will be used if this query is executed now"
        if self._for_write:
            return self._db or router.db_for_write(self.model, **self._hints)
        return self._db or router.db_for_read(self.model, **self._hints)

    ###################
    # PRIVATE METHODS #
    ###################

    def _insert(self, objs, fields, return_id=False, raw=False, using=None):
        """
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
        self._for_write = True
        if using is None:
            using = self.db
        query = sql.InsertQuery(self.model)
        query.insert_values(fields, objs, raw=raw)
        return query.get_compiler(using=using).execute_sql(return_id)
    _insert.alters_data = True
    _insert.queryset_only = False

    def _batched_insert(self, objs, fields, batch_size):
        """
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
        if not objs:
            return
        ops = connections[self.db].ops
        batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
        for batch in [objs[i:i + batch_size]
                      for i in range(0, len(objs), batch_size)]:
            self.model._base_manager._insert(batch, fields=fields,
                                             using=self.db)

    def _clone(self, klass=None, setup=False, **kwargs):
        if klass is None:
            klass = self.__class__
        elif not issubclass(self.__class__, klass):
            base_queryset_class = getattr(self, '_base_queryset_class', self.__class__)
            class_bases = (klass, base_queryset_class)
            class_dict = {
                '_base_queryset_class': base_queryset_class,
                '_specialized_queryset_class': klass,
            }
            klass = type(klass.__name__, class_bases, class_dict)

        query = self.query.clone()
        if self._sticky_filter:
            query.filter_is_sticky = True
        c = klass(model=self.model, query=query, using=self._db, hints=self._hints)
        c._for_write = self._for_write
        c._prefetch_related_lookups = self._prefetch_related_lookups[:]
        c._known_related_objects = self._known_related_objects
        c.__dict__.update(kwargs)
        if setup and hasattr(c, '_setup_query'):
            c._setup_query()
        return c

    def _fetch_all(self):
        if self._result_cache is None:
            self._result_cache = list(self.iterator())
        if self._prefetch_related_lookups and not self._prefetch_done:
            self._prefetch_related_objects()

    def _next_is_sticky(self):
        """
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.

This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
        self._sticky_filter = True
        return self

    def _merge_sanity_check(self, other):
        """
Checks that we are merging two comparable QuerySet classes. By default
this does nothing, but see the ValuesQuerySet for an example of where
it's useful.
"""
        pass

    def _merge_known_related_objects(self, other):
        """
Keep track of all known related objects from either QuerySet instance.
"""
        for field, objects in other._known_related_objects.items():
            self._known_related_objects.setdefault(field, {}).update(objects)

    def _setup_aggregate_query(self, aggregates):
        """
Prepare the query for computing a result that contains aggregate annotations.
"""
        opts = self.model._meta
        if self.query.group_by is None:
            field_names = [f.attname for f in opts.concrete_fields]
            self.query.add_fields(field_names, False)
            self.query.set_group_by()

    def _prepare(self):
        return self

    def _as_sql(self, connection):
        """
Returns the internal query's SQL and parameters (as a tuple).
"""
        obj = self.values("pk")
        if obj._db is None or connection == connections[obj._db]:
            return obj.query.get_compiler(connection=connection).as_nested_sql()
        raise ValueError("Can't do subqueries with queries on different DBs.")

    # When used as part of a nested query, a queryset will never be an "always
    # empty" result.
    value_annotation = True

    def _add_hints(self, **hints):
        """
Update hinting information for later use by Routers
"""
        # If there is any hinting information, add it to what we already know.
        # If we have a new hint for an existing key, overwrite with the new value.
        self._hints.update(hints)

    def _has_filters(self):
        """
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
        return self.query.has_filters()


class InstanceCheckMeta(type):
    def __instancecheck__(self, instance):
        return instance.query.is_empty()


class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
    """
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""

    def __init__(self, *args, **kwargs):
        raise TypeError("EmptyQuerySet can't be instantiated")


class ValuesQuerySet(QuerySet):
    def __init__(self, *args, **kwargs):
        super(ValuesQuerySet, self).__init__(*args, **kwargs)
        # select_related isn't supported in values(). (FIXME -#3358)
        self.query.select_related = False

        # QuerySet.clone() will also set up the _fields attribute with the
        # names of the model fields to select.

    def only(self, *fields):
        raise NotImplementedError("ValuesQuerySet does not implement only()")

    def defer(self, *fields):
        raise NotImplementedError("ValuesQuerySet does not implement defer()")

    def iterator(self):
        # Purge any extra columns that haven't been explicitly asked for
        extra_names = list(self.query.extra_select)
        field_names = self.field_names
        aggregate_names = list(self.query.aggregate_select)

        names = extra_names + field_names + aggregate_names

        for row in self.query.get_compiler(self.db).results_iter():
            yield dict(zip(names, row))

    def delete(self):
        # values().delete() doesn't work currently - make sure it raises an
        # user friendly error.
        raise TypeError("Queries with .values() or .values_list() applied "
                        "can't be deleted")

    def _setup_query(self):
        """
Constructs the field_names list that the values query will be
retrieving.

Called by the _clone() method after initializing the rest of the
instance.
"""
        self.query.clear_deferred_loading()
        self.query.clear_select_fields()

        if self._fields:
            self.extra_names = []
            self.aggregate_names = []
            if not self.query._extra and not self.query._aggregates:
                # Short cut - if there are no extra or aggregates, then
                # the values() clause must be just field names.
                self.field_names = list(self._fields)
            else:
                self.query.default_cols = False
                self.field_names = []
                for f in self._fields:
                    # we inspect the full extra_select list since we might
                    # be adding back an extra select item that we hadn't
                    # had selected previously.
                    if self.query._extra and f in self.query._extra:
                        self.extra_names.append(f)
                    elif f in self.query.aggregate_select:
                        self.aggregate_names.append(f)
                    else:
                        self.field_names.append(f)
        else:
            # Default to all fields.
            self.extra_names = None
            self.field_names = [f.attname for f in self.model._meta.concrete_fields]
            self.aggregate_names = None

        self.query.select = []
        if self.extra_names is not None:
            self.query.set_extra_mask(self.extra_names)
        self.query.add_fields(self.field_names, True)
        if self.aggregate_names is not None:
            self.query.set_aggregate_mask(self.aggregate_names)

    def _clone(self, klass=None, setup=False, **kwargs):
        """
Cloning a ValuesQuerySet preserves the current fields.
"""
        c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
        if not hasattr(c, '_fields'):
            # Only clone self._fields if _fields wasn't passed into the cloning
            # call directly.
            c._fields = self._fields[:]
        c.field_names = self.field_names
        c.extra_names = self.extra_names
        c.aggregate_names = self.aggregate_names
        if setup and hasattr(c, '_setup_query'):
            c._setup_query()
        return c

    def _merge_sanity_check(self, other):
        super(ValuesQuerySet, self)._merge_sanity_check(other)
        if (set(self.extra_names) != set(other.extra_names) or
                set(self.field_names) != set(other.field_names) or
                self.aggregate_names != other.aggregate_names):
            raise TypeError("Merging '%s' classes must involve the same values in each case."
                    % self.__class__.__name__)

    def _setup_aggregate_query(self, aggregates):
        """
Prepare the query for computing a result that contains aggregate annotations.
"""
        self.query.set_group_by()

        if self.aggregate_names is not None:
            self.aggregate_names.extend(aggregates)
            self.query.set_aggregate_mask(self.aggregate_names)

        super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)

    def _as_sql(self, connection):
        """
For ValuesQuerySet (and subclasses like ValuesListQuerySet), they can
only be used as nested queries if they're already set up to select only
a single field (in which case, that is the field column that is
returned). This differs from QuerySet.as_sql(), where the column to
select is set up by Django.
"""
        if ((self._fields and len(self._fields) > 1) or
                (not self._fields and len(self.model._meta.fields) > 1)):
            raise TypeError('Cannot use a multi-field %s as a filter value.'
                    % self.__class__.__name__)

        obj = self._clone()
        if obj._db is None or connection == connections[obj._db]:
            return obj.query.get_compiler(connection=connection).as_nested_sql()
        raise ValueError("Can't do subqueries with queries on different DBs.")

    def _prepare(self):
        """
Validates that we aren't trying to do a query like
value__in=qs.values('value1', 'value2'), which isn't valid.
"""
        if ((self._fields and len(self._fields) > 1) or
                (not self._fields and len(self.model._meta.fields) > 1)):
            raise TypeError('Cannot use a multi-field %s as a filter value.'
                    % self.__class__.__name__)
        return self


class ValuesListQuerySet(ValuesQuerySet):
    def iterator(self):
        if self.flat and len(self._fields) == 1:
            for row in self.query.get_compiler(self.db).results_iter():
                yield row[0]
        elif not self.query.extra_select and not self.query.aggregate_select:
            for row in self.query.get_compiler(self.db).results_iter():
                yield tuple(row)
        else:
            # When extra(select=...) or an annotation is involved, the extra
            # cols are always at the start of the row, and we need to reorder
            # the fields to match the order in self._fields.
            extra_names = list(self.query.extra_select)
            field_names = self.field_names
            aggregate_names = list(self.query.aggregate_select)

            names = extra_names + field_names + aggregate_names

            # If a field list has been specified, use it. Otherwise, use the
            # full list of fields, including extras and aggregates.
            if self._fields:
                fields = list(self._fields) + [f for f in aggregate_names if f not in self._fields]
            else:
                fields = names

            for row in self.query.get_compiler(self.db).results_iter():
                data = dict(zip(names, row))
                yield tuple(data[f] for f in fields)

    def _clone(self, *args, **kwargs):
        clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
        if not hasattr(clone, "flat"):
            # Only assign flat if the clone didn't already get it from kwargs
            clone.flat = self.flat
        return clone


class DateQuerySet(QuerySet):
    def iterator(self):
        return self.query.get_compiler(self.db).results_iter()

    def _setup_query(self):
        """
Sets up any special features of the query attribute.

Called by the _clone() method after initializing the rest of the
instance.
"""
        self.query.clear_deferred_loading()
        self.query = self.query.clone(klass=sql.DateQuery, setup=True)
        self.query.select = []
        self.query.add_select(self._field_name, self._kind, self._order)

    def _clone(self, klass=None, setup=False, **kwargs):
        c = super(DateQuerySet, self)._clone(klass, False, **kwargs)
        c._field_name = self._field_name
        c._kind = self._kind
        if setup and hasattr(c, '_setup_query'):
            c._setup_query()
        return c


class DateTimeQuerySet(QuerySet):
    def iterator(self):
        return self.query.get_compiler(self.db).results_iter()

    def _setup_query(self):
        """
Sets up any special features of the query attribute.

Called by the _clone() method after initializing the rest of the
instance.
"""
        self.query.clear_deferred_loading()
        self.query = self.query.clone(klass=sql.DateTimeQuery, setup=True, tzinfo=self._tzinfo)
        self.query.select = []
        self.query.add_select(self._field_name, self._kind, self._order)

    def _clone(self, klass=None, setup=False, **kwargs):
        c = super(DateTimeQuerySet, self)._clone(klass, False, **kwargs)
        c._field_name = self._field_name
        c._kind = self._kind
        c._tzinfo = self._tzinfo
        if setup and hasattr(c, '_setup_query'):
            c._setup_query()
        return c


def get_klass_info(klass, max_depth=0, cur_depth=0, requested=None,
                   only_load=None, from_parent=None):
    """
Helper function that recursively returns an information for a klass, to be
used in get_cached_row. It exists just to compute this information only
once for entire queryset. Otherwise it would be computed for each row, which
leads to poor performance on large querysets.

Arguments:
* klass - the class to retrieve (and instantiate)
* max_depth - the maximum depth to which a select_related()
relationship should be explored.
* cur_depth - the current depth in the select_related() tree.
Used in recursive calls to determine if we should dig deeper.
* requested - A dictionary describing the select_related() tree
that is to be retrieved. keys are field names; values are
dictionaries describing the keys on that related object that
are themselves to be select_related().
* only_load - if the query has had only() or defer() applied,
this is the list of field names that will be returned. If None,
the full field list for `klass` can be assumed.
* from_parent - the parent model used to get to this model

Note that when travelling from parent to child, we will only load child
fields which aren't in the parent.
"""
    if max_depth and requested is None and cur_depth > max_depth:
        # We've recursed deeply enough; stop now.
        return None

    if only_load:
        load_fields = only_load.get(klass) or set()
        # When we create the object, we will also be creating populating
        # all the parent classes, so traverse the parent classes looking
        # for fields that must be included on load.
        for parent in klass._meta.get_parent_list():
            fields = only_load.get(parent)
            if fields:
                load_fields.update(fields)
    else:
        load_fields = None

    if load_fields:
        # Handle deferred fields.
        skip = set()
        init_list = []
        # Build the list of fields that *haven't* been requested
        for field, model in klass._meta.get_concrete_fields_with_model():
            if field.name not in load_fields:
                skip.add(field.attname)
            elif from_parent and issubclass(from_parent, model.__class__):
                # Avoid loading fields already loaded for parent model for
                # child models.
                continue
            else:
                init_list.append(field.attname)
        # Retrieve all the requested fields
        field_count = len(init_list)
        if skip:
            klass = deferred_class_factory(klass, skip)
            field_names = init_list
        else:
            field_names = ()
    else:
        # Load all fields on klass

        field_count = len(klass._meta.concrete_fields)
        # Check if we need to skip some parent fields.
        if from_parent and len(klass._meta.local_concrete_fields) != len(klass._meta.concrete_fields):
            # Only load those fields which haven't been already loaded into
            # 'from_parent'.
            non_seen_models = [p for p in klass._meta.get_parent_list()
                               if not issubclass(from_parent, p)]
            # Load local fields, too...
            non_seen_models.append(klass)
            field_names = [f.attname for f in klass._meta.concrete_fields
                           if f.model in non_seen_models]
            field_count = len(field_names)
        # Try to avoid populating field_names variable for performance reasons.
        # If field_names variable is set, we use **kwargs based model init
        # which is slower than normal init.
        if field_count == len(klass._meta.concrete_fields):
            field_names = ()

    restricted = requested is not None

    related_fields = []
    for f in klass._meta.fields:
        if select_related_descend(f, restricted, requested, load_fields):
            if restricted:
                next = requested[f.name]
            else:
                next = None
            klass_info = get_klass_info(f.rel.to, max_depth=max_depth, cur_depth=cur_depth + 1,
                                        requested=next, only_load=only_load)
            related_fields.append((f, klass_info))

    reverse_related_fields = []
    if restricted:
        for o in klass._meta.get_all_related_objects():
            if o.field.unique and select_related_descend(o.field, restricted, requested,
                                                         only_load.get(o.model), reverse=True):
                next = requested[o.field.related_query_name()]
                parent = klass if issubclass(o.model, klass) else None
                klass_info = get_klass_info(o.model, max_depth=max_depth, cur_depth=cur_depth + 1,
                                            requested=next, only_load=only_load, from_parent=parent)
                reverse_related_fields.append((o.field, klass_info))
    if field_names:
        pk_idx = field_names.index(klass._meta.pk.attname)
    else:
        pk_idx = klass._meta.pk_index()

    return klass, field_names, field_count, related_fields, reverse_related_fields, pk_idx


def get_cached_row(row, index_start, using, klass_info, offset=0,
                   parent_data=()):
    """
Helper function that recursively returns an object with the specified
related attributes already populated.

This method may be called recursively to populate deep select_related()
clauses.

Arguments:
* row - the row of data returned by the database cursor
* index_start - the index of the row at which data for this
object is known to start
* offset - the number of additional fields that are known to
exist in row for `klass`. This usually means the number of
annotated results on `klass`.
* using - the database alias on which the query is being executed.
* klass_info - result of the get_klass_info function
* parent_data - parent model data in format (field, value). Used
to populate the non-local fields of child models.
"""
    if klass_info is None:
        return None
    klass, field_names, field_count, related_fields, reverse_related_fields, pk_idx = klass_info

    fields = row[index_start:index_start + field_count]
    # If the pk column is None (or the equivalent '' in the case the
    # connection interprets empty strings as nulls), then the related
    # object must be non-existent - set the relation to None.
    if (fields[pk_idx] is None or
        (connections[using].features.interprets_empty_strings_as_nulls and
         fields[pk_idx] == '')):
        obj = None
    elif field_names:
        fields = list(fields)
        for rel_field, value in parent_data:
            field_names.append(rel_field.attname)
            fields.append(value)
        obj = klass(**dict(zip(field_names, fields)))
    else:
        obj = klass(*fields)
    # If an object was retrieved, set the database state.
    if obj:
        obj._state.db = using
        obj._state.adding = False

    # Instantiate related fields
    index_end = index_start + field_count + offset
    # Iterate over each related object, populating any
    # select_related() fields
    for f, klass_info in related_fields:
        # Recursively retrieve the data for the related object
        cached_row = get_cached_row(row, index_end, using, klass_info)
        # If the recursive descent found an object, populate the
        # descriptor caches relevant to the object
        if cached_row:
            rel_obj, index_end = cached_row
            if obj is not None:
                # If the base object exists, populate the
                # descriptor cache
                setattr(obj, f.get_cache_name(), rel_obj)
            if f.unique and rel_obj is not None:
                # If the field is unique, populate the
                # reverse descriptor cache on the related object
                setattr(rel_obj, f.related.get_cache_name(), obj)

    # Now do the same, but for reverse related objects.
    # Only handle the restricted case - i.e., don't do a depth
    # descent into reverse relations unless explicitly requested
    for f, klass_info in reverse_related_fields:
        # Transfer data from this object to childs.
        parent_data = []
        for rel_field, rel_model in klass_info[0]._meta.get_fields_with_model():
            if rel_model is not None and isinstance(obj, rel_model):
                parent_data.append((rel_field, getattr(obj, rel_field.attname)))
        # Recursively retrieve the data for the related object
        cached_row = get_cached_row(row, index_end, using, klass_info,
                                   parent_data=parent_data)
        # If the recursive descent found an object, populate the
        # descriptor caches relevant to the object
        if cached_row:
            rel_obj, index_end = cached_row
            if obj is not None:
                # populate the reverse descriptor cache
                setattr(obj, f.related.get_cache_name(), rel_obj)
            if rel_obj is not None:
                # If the related object exists, populate
                # the descriptor cache.
                setattr(rel_obj, f.get_cache_name(), obj)
                # Populate related object caches using parent data.
                for rel_field, _ in parent_data:
                    if rel_field.rel:
                        setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname))
                        try:
                            cached_obj = getattr(obj, rel_field.get_cache_name())
                            setattr(rel_obj, rel_field.get_cache_name(), cached_obj)
                        except AttributeError:
                            # Related object hasn't been cached yet
                            pass
    return obj, index_end


class RawQuerySet(object):
    """
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
    def __init__(self, raw_query, model=None, query=None, params=None,
            translations=None, using=None, hints=None):
        self.raw_query = raw_query
        self.model = model
        self._db = using
        self._hints = hints or {}
        self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
        self.params = params or ()
        self.translations = translations or {}

    def __iter__(self):
        # Mapping of attrnames to row column positions. Used for constructing
        # the model using kwargs, needed when not all model's fields are present
        # in the query.
        model_init_field_names = {}
        # A list of tuples of (column name, column position). Used for
        # annotation fields.
        annotation_fields = []

        # Cache some things for performance reasons outside the loop.
        db = self.db
        compiler = connections[db].ops.compiler('SQLCompiler')(
            self.query, connections[db], db
        )
        need_resolv_columns = hasattr(compiler, 'resolve_columns')

        query = iter(self.query)

        try:
            # Find out which columns are model's fields, and which ones should be
            # annotated to the model.
            for pos, column in enumerate(self.columns):
                if column in self.model_fields:
                    model_init_field_names[self.model_fields[column].attname] = pos
                else:
                    annotation_fields.append((column, pos))

            # Find out which model's fields are not present in the query.
            skip = set()
            for field in self.model._meta.fields:
                if field.attname not in model_init_field_names:
                    skip.add(field.attname)
            if skip:
                if self.model._meta.pk.attname in skip:
                    raise InvalidQuery('Raw query must include the primary key')
                model_cls = deferred_class_factory(self.model, skip)
            else:
                model_cls = self.model
                # All model's fields are present in the query. So, it is possible
                # to use *args based model instantiation. For each field of the model,
                # record the query column position matching that field.
                model_init_field_pos = []
                for field in self.model._meta.fields:
                    model_init_field_pos.append(model_init_field_names[field.attname])
            if need_resolv_columns:
                fields = [self.model_fields.get(c, None) for c in self.columns]
            # Begin looping through the query values.
            for values in query:
                if need_resolv_columns:
                    values = compiler.resolve_columns(values, fields)
                # Associate fields to values
                if skip:
                    model_init_kwargs = {}
                    for attname, pos in six.iteritems(model_init_field_names):
                        model_init_kwargs[attname] = values[pos]
                    instance = model_cls(**model_init_kwargs)
                else:
                    model_init_args = [values[pos] for pos in model_init_field_pos]
                    instance = model_cls(*model_init_args)
                if annotation_fields:
                    for column, pos in annotation_fields:
                        setattr(instance, column, values[pos])

                instance._state.db = db
                instance._state.adding = False

                yield instance
        finally:
            # Done iterating the Query. If it has its own cursor, close it.
            if hasattr(self.query, 'cursor') and self.query.cursor:
                self.query.cursor.close()

    def __repr__(self):
        text = self.raw_query
        if self.params:
            text = text % (self.params if hasattr(self.params, 'keys') else tuple(self.params))
        return "<RawQuerySet: %r>" % text

    def __getitem__(self, k):
        return list(self)[k]

    @property
    def db(self):
        "Return the database that will be used if this query is executed now"
        return self._db or router.db_for_read(self.model, **self._hints)

    def using(self, alias):
        """
Selects which database this Raw QuerySet should excecute it's query against.
"""
        return RawQuerySet(self.raw_query, model=self.model,
                query=self.query.clone(using=alias),
                params=self.params, translations=self.translations,
                using=alias)

    @property
    def columns(self):
        """
A list of model field names in the order they'll appear in the
query results.
"""
        if not hasattr(self, '_columns'):
            self._columns = self.query.get_columns()

            # Adjust any column names which don't match field names
            for (query_name, model_name) in self.translations.items():
                try:
                    index = self._columns.index(query_name)
                    self._columns[index] = model_name
                except ValueError:
                    # Ignore translations for non-existant column names
                    pass

        return self._columns

    @property
    def model_fields(self):
        """
A dict mapping column names to model field names.
"""
        if not hasattr(self, '_model_fields'):
            converter = connections[self.db].introspection.table_name_converter
            self._model_fields = {}
            for field in self.model._meta.fields:
                name, column = field.get_attname_column()
                self._model_fields[converter(column)] = field
        return self._model_fields


class Prefetch(object):
    def __init__(self, lookup, queryset=None, to_attr=None):
        # `prefetch_through` is the path we traverse to perform the prefetch.
        self.prefetch_through = lookup
        # `prefetch_to` is the path to the attribute that stores the result.
        self.prefetch_to = lookup
        if to_attr:
            self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])

        self.queryset = queryset
        self.to_attr = to_attr

    def add_prefix(self, prefix):
        self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
        self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])

    def get_current_prefetch_through(self, level):
        return LOOKUP_SEP.join(self.prefetch_through.split(LOOKUP_SEP)[:level + 1])

    def get_current_prefetch_to(self, level):
        return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])

    def get_current_to_attr(self, level):
        parts = self.prefetch_to.split(LOOKUP_SEP)
        to_attr = parts[level]
        as_attr = self.to_attr and level == len(parts) - 1
        return to_attr, as_attr

    def get_current_queryset(self, level):
        if self.get_current_prefetch_to(level) == self.prefetch_to:
            return self.queryset
        return None

    def __eq__(self, other):
        if isinstance(other, Prefetch):
            return self.prefetch_to == other.prefetch_to
        return False


def normalize_prefetch_lookups(lookups, prefix=None):
    """
Helper function that normalize lookups into Prefetch objects.
"""
    ret = []
    for lookup in lookups:
        if not isinstance(lookup, Prefetch):
            lookup = Prefetch(lookup)
        if prefix:
            lookup.add_prefix(prefix)
        ret.append(lookup)
    return ret


def prefetch_related_objects(result_cache, related_lookups):
    """
Helper function for prefetch_related functionality

Populates prefetched objects caches for a list of results
from a QuerySet
"""

    if len(result_cache) == 0:
        return # nothing to do

    related_lookups = normalize_prefetch_lookups(related_lookups)

    # We need to be able to dynamically add to the list of prefetch_related
    # lookups that we look up (see below). So we need some book keeping to
    # ensure we don't do duplicate work.
    done_queries = {} # dictionary of things like 'foo__bar': [results]

    auto_lookups = [] # we add to this as we go through.
    followed_descriptors = set() # recursion protection

    all_lookups = itertools.chain(related_lookups, auto_lookups)
    for lookup in all_lookups:
        if lookup.prefetch_to in done_queries:
            if lookup.queryset:
                raise ValueError("'%s' lookup was already seen with a different queryset. "
                                 "You may need to adjust the ordering of your lookups." % lookup.prefetch_to)

            continue

        # Top level, the list of objects to decorate is the result cache
        # from the primary QuerySet. It won't be for deeper levels.
        obj_list = result_cache

        through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
        for level, through_attr in enumerate(through_attrs):
            # Prepare main instances
            if len(obj_list) == 0:
                break

            prefetch_to = lookup.get_current_prefetch_to(level)
            if prefetch_to in done_queries:
                # Skip any prefetching, and any object preparation
                obj_list = done_queries[prefetch_to]
                continue

            # Prepare objects:
            good_objects = True
            for obj in obj_list:
                # Since prefetching can re-use instances, it is possible to have
                # the same instance multiple times in obj_list, so obj might
                # already be prepared.
                if not hasattr(obj, '_prefetched_objects_cache'):
                    try:
                        obj._prefetched_objects_cache = {}
                    except AttributeError:
                        # Must be in a QuerySet subclass that is not returning
                        # Model instances, either in Django or 3rd
                        # party. prefetch_related() doesn't make sense, so quit
                        # now.
                        good_objects = False
                        break
            if not good_objects:
                break

            # Descend down tree

            # We assume that objects retrieved are homogeneous (which is the premise
            # of prefetch_related), so what applies to first object applies to all.
            first_obj = obj_list[0]
            prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr)

            if not attr_found:
                raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
                                     "parameter to prefetch_related()" %
                                     (through_attr, first_obj.__class__.__name__, lookup.prefetch_through))

            if level == len(through_attrs) - 1 and prefetcher is None:
                # Last one, this *must* resolve to something that supports
                # prefetching, otherwise there is no point adding it and the
                # developer asking for it has made a mistake.
                raise ValueError("'%s' does not resolve to a item that supports "
                                 "prefetching - this is an invalid parameter to "
                                 "prefetch_related()." % lookup.prefetch_through)

            if prefetcher is not None and not is_fetched:
                obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
                # We need to ensure we don't keep adding lookups from the
                # same relationships to stop infinite recursion. So, if we
                # are already on an automatically added lookup, don't add
                # the new lookups from relationships we've seen already.
                if not (lookup in auto_lookups and descriptor in followed_descriptors):
                    done_queries[prefetch_to] = obj_list
                    auto_lookups.extend(normalize_prefetch_lookups(additional_lookups, prefetch_to))
                followed_descriptors.add(descriptor)
            elif isinstance(getattr(first_obj, through_attr), list):
                # The current part of the lookup relates to a custom Prefetch.
                # This means that obj.attr is a list of related objects, and
                # thus we must turn the obj.attr lists into a single related
                # object list.
                new_list = []
                for obj in obj_list:
                    new_list.extend(getattr(obj, through_attr))
                obj_list = new_list
            else:
                # Either a singly related object that has already been fetched
                # (e.g. via select_related), or hopefully some other property
                # that doesn't support prefetching but needs to be traversed.

                # We replace the current list of parent objects with the list
                # of related objects, filtering out empty or missing values so
                # that we can continue with nullable or reverse relations.
                new_obj_list = []
                for obj in obj_list:
                    try:
                        new_obj = getattr(obj, through_attr)
                    except exceptions.ObjectDoesNotExist:
                        continue
                    if new_obj is None:
                        continue
                    new_obj_list.append(new_obj)
                obj_list = new_obj_list


def get_prefetcher(instance, attr):
    """
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
    prefetcher = None
    attr_found = False
    is_fetched = False

    # For singly related objects, we have to avoid getting the attribute
    # from the object, as this will trigger the query. So we first try
    # on the class, in order to get the descriptor object.
    rel_obj_descriptor = getattr(instance.__class__, attr, None)
    if rel_obj_descriptor is None:
        try:
            rel_obj = getattr(instance, attr)
            attr_found = True
            # If we are following a lookup path which leads us through a previous
            # fetch from a custom Prefetch then we might end up into a list
            # instead of related qs. This means the objects are already fetched.
            if isinstance(rel_obj, list):
                is_fetched = True
        except AttributeError:
            pass
    else:
        attr_found = True
        if rel_obj_descriptor:
            # singly related object, descriptor object has the
            # get_prefetch_queryset() method.
            if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
                prefetcher = rel_obj_descriptor
                if rel_obj_descriptor.is_cached(instance):
                    is_fetched = True
            else:
                # descriptor doesn't support prefetching, so we go ahead and get
                # the attribute on the instance rather than the class to
                # support many related managers
                rel_obj = getattr(instance, attr)
                if hasattr(rel_obj, 'get_prefetch_queryset'):
                    prefetcher = rel_obj
    return prefetcher, rel_obj_descriptor, attr_found, is_fetched


def prefetch_one_level(instances, prefetcher, lookup, level):
    """
Helper function for prefetch_related_objects

Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.

The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
    # prefetcher must have a method get_prefetch_queryset() which takes a list
    # of instances, and returns a tuple:

    # (queryset of instances of self.model that are related to passed in instances,
    # callable that gets value to be matched for returned instances,
    # callable that gets value to be matched for passed in instances,
    # boolean that is True for singly related objects,
    # cache name to assign to).

    # The 'values to be matched' must be hashable as they will be used
    # in a dictionary.

    rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
        prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
    # We have to handle the possibility that the default manager itself added
    # prefetch_related lookups to the QuerySet we just got back. We don't want to
    # trigger the prefetch_related functionality by evaluating the query.
    # Rather, we need to merge in the prefetch_related lookups.
    additional_lookups = getattr(rel_qs, '_prefetch_related_lookups', [])
    if additional_lookups:
        # Don't need to clone because the manager should have given us a fresh
        # instance, so we access an internal instead of using public interface
        # for performance reasons.
        rel_qs._prefetch_related_lookups = []

    all_related_objects = list(rel_qs)

    rel_obj_cache = {}
    for rel_obj in all_related_objects:
        rel_attr_val = rel_obj_attr(rel_obj)
        rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)

    for obj in instances:
        instance_attr_val = instance_attr(obj)
        vals = rel_obj_cache.get(instance_attr_val, [])
        to_attr, as_attr = lookup.get_current_to_attr(level)
        if single:
            val = vals[0] if vals else None
            to_attr = to_attr if as_attr else cache_name
            setattr(obj, to_attr, val)
        else:
            if as_attr:
                setattr(obj, to_attr, vals)
            else:
                # Cache in the QuerySet.all().
                qs = getattr(obj, to_attr).all()
                qs._result_cache = vals
                # We don't want the individual qs doing prefetch_related now,
                # since we have merged this into the current work.
                qs._prefetch_done = True
                obj._prefetched_objects_cache[cache_name] = qs
    return all_related_objects, additional_lookups
Something went wrong with that request. Please try again.