Skip to content

Commit

Permalink
Fixed VARCHAR/LONGVARCHAR comparison bug.
Browse files Browse the repository at this point in the history
As reported by Andy Hochhaus in the pyodbc group, SQL Server fails if you compare a
SQL_LONGVARCHAR parameter with a varchar field.  Had been using a hardcoded 255 to determine
when to switch from VARCHAR to LONGVARCHAR.  Now uses SQLGetTypeInfo to determine the maximum
varchar and binary widths.  This will also help performance for programs that bind a lot of
strings greater than 255 but less than their db's max varchar size.
  • Loading branch information
mkfiserv committed Aug 20, 2010
1 parent eb54575 commit 1c7aefa
Show file tree
Hide file tree
Showing 9 changed files with 48 additions and 21 deletions.
1 change: 1 addition & 0 deletions .gitignore
Expand Up @@ -8,3 +8,4 @@ dist
tmp
web/*.cmd
TAGS
pyodbc.egg-info
26 changes: 21 additions & 5 deletions src/cnxninfo.cpp
Expand Up @@ -104,21 +104,37 @@ static PyObject* CnxnInfo_New(Connection* cnxn)
p->supports_describeparam = szYN[0] == 'Y';
}

// What is the datetime precision? This unfortunately requires a cursor (HSTMT).
// These defaults are tiny, but are necessary for Access.
p->varchar_maxlength = 255;
p->binary_maxlength = 510;

HSTMT hstmt = 0;
if (SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_STMT, cnxn->hdbc, &hstmt)))
{
SQLINTEGER columnsize;
if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_TYPE_TIMESTAMP)) && SQL_SUCCEEDED(SQLFetch(hstmt)))
{
SQLINTEGER columnsize;
if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0)))
{
p->datetime_precision = columnsize;
}

SQLFreeStmt(hstmt, SQL_CLOSE);
}

SQLFreeStmt(hstmt, SQL_CLOSE);
if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_VARCHAR)) && SQL_SUCCEEDED(SQLFetch(hstmt)))
{
if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0)))
p->varchar_maxlength = (int)columnsize;

SQLFreeStmt(hstmt, SQL_CLOSE);
}

if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_BINARY)) && SQL_SUCCEEDED(SQLFetch(hstmt)))
{
if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0)))
p->binary_maxlength = (int)columnsize;

SQLFreeStmt(hstmt, SQL_CLOSE);
}
}

Py_END_ALLOW_THREADS
Expand Down
3 changes: 3 additions & 0 deletions src/cnxninfo.h
Expand Up @@ -26,6 +26,9 @@ struct CnxnInfo

bool supports_describeparam;
int datetime_precision;

int varchar_maxlength;
int binary_maxlength;
};

void CnxnInfo_init();
Expand Down
2 changes: 2 additions & 0 deletions src/connection.cpp
Expand Up @@ -234,6 +234,8 @@ PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi,
cnxn->odbc_minor = p->odbc_minor;
cnxn->supports_describeparam = p->supports_describeparam;
cnxn->datetime_precision = p->datetime_precision;
cnxn->varchar_maxlength = p->varchar_maxlength;
cnxn->binary_maxlength = p->binary_maxlength;

return reinterpret_cast<PyObject*>(cnxn);
}
Expand Down
9 changes: 7 additions & 2 deletions src/connection.h
Expand Up @@ -43,8 +43,13 @@ struct Connection
// If true, then the strings in the rows are returned as unicode objects.
bool unicode_results;

// The connection timeout in seconds.
int timeout;
// The connection timeout in seconds.
int timeout;

// These are copied from cnxn info for performance and convenience.

int varchar_maxlength;
int binary_maxlength;
};

#define Connection_Check(op) PyObject_TypeCheck(op, &ConnectionType)
Expand Down
4 changes: 2 additions & 2 deletions src/cursor.cpp
Expand Up @@ -730,7 +730,7 @@ execute(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first)
SQLLEN cb = (SQLLEN)PyUnicode_GET_SIZE(pParam);
while (offset < cb)
{
SQLLEN remaining = min(MAX_VARCHAR_BUFFER, cb - offset);
SQLLEN remaining = min(cur->cnxn->varchar_maxlength, cb - offset);
Py_BEGIN_ALLOW_THREADS
SQLPutData(cur->hstmt, &p[offset], remaining * 2);
Py_END_ALLOW_THREADS
Expand All @@ -744,7 +744,7 @@ execute(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first)
SQLLEN cb = (SQLLEN)PyString_GET_SIZE(pParam);
while (offset < cb)
{
SQLLEN remaining = min(MAX_VARCHAR_BUFFER, cb - offset);
SQLLEN remaining = min(cur->cnxn->varchar_maxlength, cb - offset);
Py_BEGIN_ALLOW_THREADS
SQLPutData(cur->hstmt, (SQLPOINTER)&p[offset], remaining);
Py_END_ALLOW_THREADS
Expand Down
8 changes: 0 additions & 8 deletions src/cursor.h
Expand Up @@ -102,12 +102,4 @@ void Cursor_init();
Cursor* Cursor_New(Connection* cnxn);
PyObject* Cursor_execute(PyObject* self, PyObject* args);

enum
{
// The parameter size we'll try to bind. If a buffer is larger than this, we'll use SQLPutData.

MAX_VARCHAR_BUFFER = 255, // MS Access
MAX_VARBINARY_BUFFER = 510, // MS Access
};

#endif
6 changes: 3 additions & 3 deletions src/params.cpp
Expand Up @@ -463,7 +463,7 @@ static bool BindParam(Cursor* cur, int iParam, PyObject* param, byte** ppbParam)
char* pch = PyString_AS_STRING(param);
int len = PyString_GET_SIZE(param);

if (len <= MAX_VARCHAR_BUFFER)
if (len <= cur->cnxn->varchar_maxlength)
{
fSqlType = SQL_VARCHAR;
fCType = SQL_C_CHAR;
Expand All @@ -487,7 +487,7 @@ static bool BindParam(Cursor* cur, int iParam, PyObject* param, byte** ppbParam)
Py_UNICODE* pch = PyUnicode_AsUnicode(param);
int len = PyUnicode_GET_SIZE(param);

if (len <= MAX_VARCHAR_BUFFER)
if (len <= cur->cnxn->varchar_maxlength)
{
fSqlType = SQL_WVARCHAR;
fCType = SQL_C_WCHAR;
Expand Down Expand Up @@ -659,7 +659,7 @@ static bool BindParam(Cursor* cur, int iParam, PyObject* param, byte** ppbParam)
const char* pb;
int cb = PyBuffer_GetMemory(param, &pb);

if (cb != -1 && cb <= MAX_VARBINARY_BUFFER)
if (cb != -1 && cb <= cur->cnxn->binary_maxlength)
{
// There is one segment, so we can bind directly into the buffer object.

Expand Down
10 changes: 9 additions & 1 deletion tests/sqlservertests.py
Expand Up @@ -227,6 +227,15 @@ def test_varchar_many(self):
def test_varchar_upperlatin(self):
self._test_strtype('varchar', 'á')

def test_varchar_mismatch(self):
# Reported by Andy Hochhaus in the pyodbc group: In 2.1.7 and earlier, a hardcoded length of 255 was used to
# determine whether a parameter was bound as a SQL_VARCHAR or SQL_LONGVARCHAR. Apparently SQL Server chokes if
# we bind as a SQL_LONGVARCHAR and the target column size is 8000 or less, which is considers just SQL_VARCHAR.
# This means binding a 256 character value would cause problems if compared with a VARCHAR column under
# 8001. We now use SQLGetTypeInfo to determine the time to switch.
self.cursor.execute("create table t1(c varchar(300))")
self.cursor.execute("select * from t1 where c=?", 'a' * 300)

#
# unicode
#
Expand Down Expand Up @@ -1051,7 +1060,6 @@ def test_none_param(self):
self.assertEqual(row.blob, None)



def main():
from optparse import OptionParser
parser = OptionParser(usage=usage)
Expand Down

0 comments on commit 1c7aefa

Please sign in to comment.