Skip to content

Commit

Permalink
provide compatibility with Django 1.9
Browse files Browse the repository at this point in the history
  • Loading branch information
michiya committed Dec 4, 2015
1 parent aed82b8 commit 204d1fc
Show file tree
Hide file tree
Showing 8 changed files with 89 additions and 68 deletions.
10 changes: 5 additions & 5 deletions README.rst
Expand Up @@ -17,7 +17,7 @@ Microsoft SQL Server and Azure SQL Database.
Features
--------

- Supports Django 1.8.7
- Supports Django 1.9
- Supports Microsoft SQL Server 2005, 2008/2008R2, 2012, 2014 and
Azure SQL Database
- Supports LIMIT+OFFSET and offset w/o LIMIT emulation.
Expand All @@ -29,7 +29,7 @@ Features
Dependencies
------------

- Django 1.8.7
- Django 1.9
- pyodbc 3.0 or newer

Installation
Expand Down Expand Up @@ -236,12 +236,12 @@ The following features are currently not supported:
Notice
------

This version of *django-pyodbc-azure* only supports Django 1.8.
This version of *django-pyodbc-azure* only supports Django 1.9.
If you want to use it on older versions of Django,
specify an appropriate version number (1.2.x for Django 1.7)
specify an appropriate version number (1.8.x.x for Django 1.8)
at installation like this: ::

pip install "django-pyodbc-azure<1.8"
pip install "django-pyodbc-azure<1.9"

License
-------
Expand Down
6 changes: 2 additions & 4 deletions setup.py
Expand Up @@ -11,16 +11,14 @@
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
]

setup(
name='django-pyodbc-azure',
version='1.8.7.1',
version='1.9.0.0',
description='Django backend for Microsoft SQL Server and Azure SQL Database using pyodbc',
long_description=open('README.rst').read(),
author='Michiya Takahashi',
Expand All @@ -29,7 +27,7 @@
license='BSD',
packages=['sql_server', 'sql_server.pyodbc'],
install_requires=[
'Django>=1.8.7,<1.9',
'Django>=1.9.,<1.10',
'pyodbc>=3.0',
],
classifiers=CLASSIFIERS,
Expand Down
2 changes: 1 addition & 1 deletion sql_server/pyodbc/base.py
Expand Up @@ -8,7 +8,7 @@

from django.core.exceptions import ImproperlyConfigured
from django import VERSION
if VERSION[:3] < (1,8,7) or VERSION[:2] >= (1,9):
if VERSION[:3] < (1,9,0) or VERSION[:2] >= (1,10):
raise ImproperlyConfigured("Django %d.%d.%d is not supported." % VERSION[:3])

try:
Expand Down
63 changes: 30 additions & 33 deletions sql_server/pyodbc/compiler.py
Expand Up @@ -15,11 +15,8 @@ def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.subquery = subquery
refcounts_before = self.query.alias_refcount.copy()
try:
Expand All @@ -43,8 +40,8 @@ def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()

where, w_params = self.compile(self.query.where)
having, h_params = self.compile(self.query.having)
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
having, h_params = self.compile(self.having) if self.having is not None else ("", [])
params = []
result = ['SELECT']

Expand Down Expand Up @@ -176,6 +173,15 @@ def _as_microsoft(self, node):
node.arg_joiner = ' + '
node.template = '%(expressions)s'
node = node.coalesce()
# SQL Server does not provide GREATEST/LEAST functions,
# so we emulate them with table value constructors
# https://msdn.microsoft.com/en-us/library/dd776382.aspx
elif node.function == 'GREATEST':
node.arg_joiner = '), ('
node.template = '(SELECT MAX(value) FROM (VALUES (%(expressions)s)) AS _%(function)s(value))'
elif node.function == 'LEAST':
node.arg_joiner = '), ('
node.template = '(SELECT MIN(value) FROM (VALUES (%(expressions)s)) AS _%(function)s(value))'
elif node.function == 'LENGTH':
node.function = 'LEN'
elif node.function == 'STDDEV_SAMP':
Expand Down Expand Up @@ -205,48 +211,39 @@ def as_sql(self):

if has_fields:
fields = self.query.fields
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
values_format = 'VALUES (%s)'
params = values = [
[
f.get_db_prep_save(
getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True),
connection=self.connection
) for f in fields
]
value_rows = [
[self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields]
for obj in self.query.objs
]
else:
values_format = '%s VALUES'
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
# An empty object.
value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)

if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (not self.return_id and self.connection.features.has_bulk_insert) and has_fields

placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)

if self.return_id and self.connection.features.can_return_id_from_insert:
result.insert(0, 'SET NOCOUNT ON')
result.append((values_format + ';') % ', '.join(placeholders[0]))
result.append((values_format + ';') % ', '.join(placeholder_rows[0]))
result.append('SELECT CAST(SCOPE_IDENTITY() AS BIGINT)')
return [(" ".join(result), tuple(params[0]))]
return [(" ".join(result), tuple(param_rows[0]))]

if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
sql = [(" ".join(result), tuple([v for val in values for v in val]))]
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
sql = [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
sql = [
(" ".join(result + [values_format % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
for p, vals in zip(placeholder_rows, param_rows)
]

if has_fields:
Expand Down
1 change: 0 additions & 1 deletion sql_server/pyodbc/features.py
Expand Up @@ -17,7 +17,6 @@ class DatabaseFeatures(BaseDatabaseFeatures):
has_select_for_update = True
has_select_for_update_nowait = True
has_zoneinfo_database = pytz is not None
needs_datetime_string_cast = False
requires_literal_defaults = True
requires_sqlparse_for_splitting = False
supports_1000_query_parameters = False
Expand Down
7 changes: 6 additions & 1 deletion sql_server/pyodbc/introspection.py
@@ -1,9 +1,13 @@
from collections import namedtuple

import pyodbc as Database

from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)

FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('default',))

SQL_AUTOFIELD = -777555


Expand All @@ -25,6 +29,7 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
Database.SQL_NUMERIC: 'DecimalField',
Database.SQL_REAL: 'FloatField',
Database.SQL_SMALLINT: 'SmallIntegerField',
Database.SQL_SS_TIME2: 'TimeField',
Database.SQL_TINYINT: 'SmallIntegerField',
Database.SQL_TYPE_DATE: 'DateField',
Database.SQL_TYPE_TIME: 'TimeField',
Expand Down Expand Up @@ -87,7 +92,7 @@ def get_table_description(self, cursor, table_name, identity_check=True):
"""

# map pyodbc's cursor.columns to db-api cursor description
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table_name)]
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10], c[12]] for c in cursor.columns(table=table_name)]
items = []
for column in columns:
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
Expand Down
33 changes: 28 additions & 5 deletions sql_server/pyodbc/operations.py
Expand Up @@ -4,6 +4,7 @@

from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models.functions import Greatest, Least
from django.utils import timezone
from django.utils.encoding import force_text
from django.utils.six import string_types
Expand Down Expand Up @@ -46,9 +47,10 @@ def bulk_batch_size(self, fields, objs):
size = max_row_values // fields_len
return size

def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql

def cache_key_culling_sql(self):
"""
Expand All @@ -62,6 +64,16 @@ def cache_key_culling_sql(self):
"ROW_NUMBER() OVER (ORDER BY cache_key) AS rn FROM %s" \
") cache WHERE rn = %%s + 1"

def check_expression_support(self, expression):
if self.connection.sql_server_version < 2008:
# we can't even emulate GREATEST or LEAST
unsupported_functions = (Greatest, Least)
for f in unsupported_functions:
if isinstance(expression, f):
raise NotImplementedError(
'SQL Server has no support for %s function.' %
f.function)

def combine_duration_expression(self, connector, sub_expressions):
lhs, rhs = sub_expressions
sign = ' * -1' if connector == '-' else ''
Expand Down Expand Up @@ -141,6 +153,17 @@ def date_trunc_sql(self, lookup_type, field_name):
if lookup_type == 'day':
return "CONVERT(datetime, CONVERT(varchar(12), %s, 112))" % field_name

def datetime_cast_date_sql(self, field_name, tzname):
if settings.USE_TZ and not tzname == 'UTC':
offset = self._get_utcoffset(tzname)
field_name = 'DATEADD(second, %d, %s)' % (offset, field_name)
params = []
if self.connection.use_legacy_datetime:
sql = 'CONVERT(datetime, CONVERT(char(10), %s, 101), 101)' % field_name
else:
sql = 'CAST(%s AS date)' % field_name
return sql, params

def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ and not tzname == 'UTC':
offset = self._get_utcoffset(tzname)
Expand Down Expand Up @@ -383,7 +406,7 @@ def prep_for_iexact_query(self, x):
"""
return x

def value_to_db_datetime(self, value):
def adapt_datetimefield_value(self, value):
"""
Transforms a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
Expand All @@ -397,7 +420,7 @@ def value_to_db_datetime(self, value):
value = value.replace(microsecond=0)
return value

def value_to_db_time(self, value):
def adapt_timefield_value(self, value):
"""
Transforms a time value to an object compatible with what is expected
by the backend driver for time columns.
Expand Down
35 changes: 17 additions & 18 deletions sql_server/pyodbc/schema.py
Expand Up @@ -66,7 +66,7 @@ def _alter_field(self, model, old_field, new_field, old_type, new_type,
raise NotImplementedError("the backend doesn't support altering from/to AutoField.")
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if old_field.rel and old_field.db_constraint:
if old_field.remote_field and old_field.db_constraint:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
Expand Down Expand Up @@ -288,8 +288,8 @@ def _alter_field(self, model, old_field, new_field, old_type, new_type,
for sql, params in other_actions:
self.execute(sql, params)
# Does it have a foreign key?
if (new_field.rel and
(fks_dropped or not old_field.rel or not old_field.db_constraint) and
if (new_field.remote_field and
(fks_dropped or not old_field.remote_field or not old_field.db_constraint) and
new_field.db_constraint):
self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s"))
# Rebuild FKs that pointed to us if we previously had to drop them
Expand Down Expand Up @@ -353,8 +353,8 @@ def add_field(self, model, field):
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if field.many_to_many and field.rel.through._meta.auto_created:
return self.create_model(field.rel.through)
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
Expand Down Expand Up @@ -395,7 +395,7 @@ def add_field(self, model, field):
if field.db_index and not field.unique:
self.deferred_sql.append(self._create_index_sql(model, [field]))
# Add any FK constraints later
if field.rel and self.connection.features.supports_foreign_keys and field.db_constraint:
if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
Expand Down Expand Up @@ -428,9 +428,9 @@ def create_model(self, model):
definition += " %s" % col_type_suffix
params.extend(extra_params)
# FK
if field.rel and field.db_constraint:
to_table = field.rel.to._meta.db_table
to_column = field.rel.to._meta.get_field(field.rel.field_name).column
if field.remote_field and field.db_constraint:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column
if self.connection.features.supports_foreign_keys:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
elif self.sql_create_inline_fk:
Expand All @@ -449,12 +449,11 @@ def create_model(self, model):
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)

# Add any unique_togethers
# Add any unique_togethers (always deferred, as some fields might be
# created afterwards, like geometry fields with some backends)
for fields in model._meta.unique_together:
columns = [model._meta.get_field(field).column for field in fields]
column_sqls.append(self.sql_create_table_unique % {
"columns": ", ".join(self.quote_name(column) for column in columns),
})
self.deferred_sql.append(self._create_unique_sql(model, columns))
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
Expand All @@ -472,8 +471,8 @@ def create_model(self, model):

# Make M2M tables
for field in model._meta.local_many_to_many:
if field.rel.through._meta.auto_created:
self.create_model(field.rel.through)
if field.remote_field.through._meta.auto_created:
self.create_model(field.remote_field.through)

def delete_model(self, model):
"""
Expand Down Expand Up @@ -552,13 +551,13 @@ def remove_field(self, model, field):
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if field.many_to_many and field.rel.through._meta.auto_created:
return self.delete_model(field.rel.through)
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.rel:
if field.remote_field:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
Expand Down

0 comments on commit 204d1fc

Please sign in to comment.