Permalink
Browse files

Single source base for Python 2 and 3

  • Loading branch information...
1 parent 8fa1cc4 commit c278e74e44341a4333dbd2b471758b651abc7558 @mkleehammer committed Nov 3, 2011
Showing with 10,675 additions and 2,716 deletions.
  1. +0 −1 .gitignore
  2. +0 −1 MANIFEST.in
  3. +5 −0 README.rst
  4. +10 −66 setup.py
  5. +8 −3 src/buffer.cpp
  6. +7 −2 src/buffer.h
  7. +215 −206 src/cnxninfo.cpp
  8. +43 −43 src/cnxninfo.h
  9. +29 −12 src/connection.cpp
  10. +3 −3 src/connection.h
  11. +92 −74 src/cursor.cpp
  12. +3 −3 src/cursor.h
  13. +32 −32 src/dbspecific.h
  14. +10 −3 src/errors.cpp
  15. +64 −60 src/errors.h
  16. +733 −667 src/getdata.cpp
  17. +15 −15 src/getdata.h
  18. +813 −725 src/params.cpp
  19. +13 −11 src/params.h
  20. +24 −55 src/pyodbc.h
  21. +31 −0 src/pyodbccompat.cpp
  22. +106 −0 src/pyodbccompat.h
  23. +110 −110 src/pyodbcdbg.cpp
  24. +135 −154 src/pyodbcmodule.cpp
  25. +2 −5 src/pyodbcmodule.h
  26. +181 −95 src/row.cpp
  27. +1 −1 src/row.h
  28. +222 −162 src/sqlwchar.cpp
  29. +62 −61 src/sqlwchar.h
  30. +79 −79 src/wrapper.h
  31. 0 {tests → tests2}/accesstests.py
  32. 0 {tests → tests2}/dbapi20.py
  33. 0 {tests → tests2}/dbapitests.py
  34. 0 {tests → tests2}/empty.accdb
  35. 0 {tests → tests2}/empty.mdb
  36. 0 {tests → tests2}/exceltests.py
  37. +14 −67 tests/sqlservertests.py → tests2/informixtests.py
  38. +690 −0 tests2/mysqltests.py
  39. 0 {tests → tests2}/pgtests.py
  40. 0 {tests → tests2}/sqlite.db
  41. 0 {tests → tests2}/sqlitetests.py
  42. +1,336 −0 tests2/sqlservertests.py
  43. 0 {tests → tests2}/test.xls
  44. +25 −0 tests2/testbase.py
  45. 0 {tests → tests2}/testutils.py
  46. +648 −0 tests3/accesstests.py
  47. +850 −0 tests3/dbapi20.py
  48. +43 −0 tests3/dbapitests.py
  49. +140 −0 tests3/exceltests.py
  50. +1,258 −0 tests3/informixtests.py
  51. 0 {tests → tests3}/mysqltests.py
  52. +424 −0 tests3/pgtests.py
  53. +753 −0 tests3/sqlitetests.py
  54. +1,304 −0 tests3/sqlservertests.py
  55. +17 −0 tests3/test.py
  56. +25 −0 tests3/testbase.py
  57. +100 −0 tests3/testutils.py
View
1 .gitignore
@@ -6,7 +6,6 @@ dist
*.pyc
*.pyo
tmp
-web/*.cmd
TAGS
pyodbc.egg-info
pyodbc.conf
View
1 MANIFEST.in
@@ -2,7 +2,6 @@ include src/*.h
include src/*.cpp
include tests/*
include README.rst
-include utils/*
# Include this file, needed for bdist_rpm
include MANIFEST.in
View
5 README.rst
@@ -7,6 +7,7 @@ specification.
:homepage: http://code.google.com/p/pyodbc
:source: http://github.com/mkleehammer/pyodbc
+:source: http://code.google.com/p/pyodbc/source/list
This module requires:
@@ -21,6 +22,10 @@ Source can be obtained at
http://github.com/mkleehammer/pyodbc/tree
+ or
+
+ http://code.google.com/p/pyodbc/source/list
+
To build from source, either check the source out of version control or download a source
extract and run::
View
76 setup.py
@@ -2,7 +2,6 @@
import sys, os, re, platform
from os.path import exists, abspath, dirname, join, isdir
-from ConfigParser import SafeConfigParser
try:
# Allow use of setuptools so eggs can be built.
@@ -15,6 +14,9 @@
OFFICIAL_BUILD = 9999
+def _print(s):
+ # Python 2/3 compatibility
+ sys.stdout.write(s + '\n')
class VersionCommand(Command):
@@ -30,7 +32,7 @@ def finalize_options(self):
def run(self):
version_str, version = get_version()
- print version_str
+ sys.stdout.write(version_str + '\n')
class TagsCommand(Command):
@@ -110,7 +112,7 @@ def get_compiler_settings(version_str):
for option in ['assert', 'trace', 'leak-check']:
try:
sys.argv.remove('--%s' % option)
- settings['define_macros'].append(('PYODBC_%s' % option.replace('-', '_'), 1))
+ settings['define_macros'].append(('PYODBC_%s' % option.replace('-', '_').upper(), 1))
except ValueError:
pass
@@ -144,67 +146,9 @@ def get_compiler_settings(version_str):
# What is the proper way to detect iODBC, MyODBC, unixODBC, etc.?
settings['libraries'].append('odbc')
- get_config(settings, version_str)
-
return settings
-def get_config(settings, version_str):
- """
- Adds configuration macros from pyodbc.conf to the compiler settings dictionary.
-
- If pyodbc.conf does not exist, it will compile and run the pyodbcconf utility.
-
- This is similar to what autoconf provides, but only uses the infrastructure provided by Python, which is important
- for building on *nix and Windows.
- """
- filename = 'pyodbc.conf'
-
- # If the file exists, make sure that the version in it is the same as the version we are compiling. Otherwise we
- # might have added configuration items that aren't there.
- if exists(filename):
- try:
- config = SafeConfigParser()
- config.read(filename)
-
- if (not config.has_option('define_macros', 'pyodbc_version') or
- config.get('define_macros', 'pyodbc_version') != version_str):
- print 'Recreating pyodbc.conf for new version'
- os.remove(filename)
-
- except:
- config = None
- # Assume the file has been corrupted. Delete and recreate
- print 'Unable to read %s. Recreating' % filename
- os.remove(filename)
-
- if not exists('pyodbc.conf'):
- # Doesn't exist, so build the pyodbcconf module and use it.
-
- oldargv = sys.argv
- sys.argv = [ oldargv[0], 'build' ]
-
- setup(name="pyodbcconf",
- ext_modules = [ Extension('pyodbcconf',
- [join('utils', 'pyodbcconf', 'pyodbcconf.cpp')],
- **settings) ])
-
- sys.argv = oldargv
-
- add_to_path()
-
- import pyodbcconf
- pyodbcconf.configure()
-
- config = SafeConfigParser()
- config.read(filename)
-
- for section in config.sections():
- for key, value in config.items(section):
- settings[section].append( (key.upper(), value) )
-
-
-
def add_to_path():
"""
Prepends the build directory to the path so pyodbcconf can be imported without installing it.
@@ -240,7 +184,7 @@ def get_version():
1. If in a git repository, use the latest tag (git describe).
2. If in an unzipped source directory (from setup.py sdist),
read the version from the PKG-INFO file.
- 3. Use 2.1.0.0 and complain a lot.
+ 3. Use 3.0.0.0 and complain a lot.
"""
# My goal is to (1) provide accurate tags for official releases but (2) not have to manage tags for every test
# release.
@@ -268,8 +212,8 @@ def get_version():
name, numbers = _get_version_git()
if not numbers:
- print 'WARNING: Unable to determine version. Using 2.1.0.0'
- name, numbers = '2.1.0-unsupported', [2,1,0,0]
+ _print('WARNING: Unable to determine version. Using 3.0.0.0')
+ name, numbers = '3.0.0-unsupported', [3,0,0,0]
return name, numbers
@@ -290,9 +234,9 @@ def _get_version_pkginfo():
def _get_version_git():
- n, result = getoutput('git describe --tags --match 2.*')
+ n, result = getoutput('git describe --tags --match 3.*')
if n:
- print 'WARNING: git describe failed with: %s %s' % (n, result)
+ _print('WARNING: git describe failed with: %s %s' % (n, result))
return None, None
match = re.match(r'(\d+).(\d+).(\d+) (?: -(\d+)-g[0-9a-z]+)?', result, re.VERBOSE)
View
11 src/buffer.cpp
@@ -10,15 +10,19 @@
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include "pyodbc.h"
+
+#if PY_MAJOR_VERSION < 3
+
+
#include "buffer.h"
#include "pyodbcmodule.h"
Py_ssize_t
PyBuffer_GetMemory(PyObject* buffer, const char** pp)
{
- PyBufferProcs* procs = buffer->ob_type->tp_as_buffer;
+ PyBufferProcs* procs = Py_TYPE(buffer)->tp_as_buffer;
- if (!procs || !PyType_HasFeature(buffer->ob_type, Py_TPFLAGS_HAVE_GETCHARBUFFER))
+ if (!procs || !PyType_HasFeature(Py_TYPE(buffer), Py_TPFLAGS_HAVE_GETCHARBUFFER))
{
// Can't access the memory directly because the buffer object doesn't support it.
return -1;
@@ -53,6 +57,7 @@ PyBuffer_Size(PyObject* self)
}
Py_ssize_t total_len = 0;
- self->ob_type->tp_as_buffer->bf_getsegcount(self, &total_len);
+ Py_TYPE(self)->tp_as_buffer->bf_getsegcount(self, &total_len);
return total_len;
}
+#endif
View
9 src/buffer.h
@@ -12,6 +12,8 @@
#ifndef _BUFFER_H
#define _BUFFER_H
+#if PY_MAJOR_VERSION < 3
+
// If the buffer object has a single, accessible segment, returns the length of the buffer. If 'pp' is not NULL, the
// address of the segment is also returned. If there is more than one segment or if it cannot be accessed, -1 is
// returned and 'pp' is not modified.
@@ -36,7 +38,7 @@ class BufferSegmentIterator
BufferSegmentIterator(PyObject* _pBuffer)
{
pBuffer = _pBuffer;
- PyBufferProcs* procs = pBuffer->ob_type->tp_as_buffer;
+ PyBufferProcs* procs = Py_TYPE(pBuffer)->tp_as_buffer;
iSegment = 0;
cSegments = procs->bf_getsegcount(pBuffer, 0);
}
@@ -46,10 +48,13 @@ class BufferSegmentIterator
if (iSegment >= cSegments)
return false;
- PyBufferProcs* procs = pBuffer->ob_type->tp_as_buffer;
+ PyBufferProcs* procs = Py_TYPE(pBuffer)->tp_as_buffer;
cb = procs->bf_getreadbuffer(pBuffer, iSegment++, (void**)&pb);
return true;
}
};
+#endif // PY_MAJOR_VERSION
+
+
#endif
View
421 src/cnxninfo.cpp
@@ -1,206 +1,215 @@
-
-// There is a bunch of information we want from connections which requires calls to SQLGetInfo when we first connect.
-// However, this isn't something we really want to do for every connection, so we cache it by the hash of the
-// connection string. When we create a new connection, we copy the values into the connection structure.
-//
-// We hash the connection string since it may contain sensitive information we wouldn't want exposed in a core dump.
-
-#include "pyodbc.h"
-#include "cnxninfo.h"
-#include "connection.h"
-#include "wrapper.h"
-
-// Maps from a Python string of the SHA1 hash to a CnxnInfo object.
-//
-static PyObject* map_hash_to_info;
-
-static PyObject* hashlib; // The hashlib module if Python 2.5+
-static PyObject* sha; // The sha module if Python 2.4
-static PyObject* update; // The string 'update', used in GetHash.
-
-void CnxnInfo_init()
-{
- // Called during startup to give us a chance to import the hash code. If we can't find it, we'll print a warning
- // to the console and not cache anything.
-
- // First try hashlib which was added in 2.5. 2.6 complains using warnings which we don't want affecting the
- // caller.
-
- map_hash_to_info = PyDict_New();
-
- update = PyString_FromString("update");
-
- hashlib = PyImport_ImportModule("hashlib");
- if (!hashlib)
- {
- sha = PyImport_ImportModule("sha");
- }
-}
-
-static PyObject* GetHash(PyObject* p)
-{
- if (hashlib)
- {
- Object hash(PyObject_CallMethod(hashlib, "new", "s", "sha1"));
- if (!hash.IsValid())
- return 0;
-
- PyObject_CallMethodObjArgs(hash, update, p, 0);
- return PyObject_CallMethod(hash, "hexdigest", 0);
- }
-
- if (sha)
- {
- Object hash(PyObject_CallMethod(sha, "new", 0));
- if (!hash.IsValid())
- return 0;
-
- PyObject_CallMethodObjArgs(hash, update, p, 0);
- return PyObject_CallMethod(hash, "hexdigest", 0);
- }
-
- return 0;
-}
-
-
-static PyObject* CnxnInfo_New(Connection* cnxn)
-{
- CnxnInfo* p = PyObject_NEW(CnxnInfo, &CnxnInfoType);
- if (!p)
- return 0;
- Object info((PyObject*)p);
-
- // set defaults
- p->odbc_major = 3;
- p->odbc_minor = 50;
- p->supports_describeparam = false;
- p->datetime_precision = 19; // default: "yyyy-mm-dd hh:mm:ss"
-
- // WARNING: The GIL lock is released for the *entire* function here. Do not touch any objects, call Python APIs,
- // etc. We are simply making ODBC calls and setting atomic values (ints & chars). Also, make sure the lock gets
- // released -- do not add an early exit.
-
- SQLRETURN ret;
- Py_BEGIN_ALLOW_THREADS
-
- char szVer[20];
- SQLSMALLINT cch = 0;
- ret = SQLGetInfo(cnxn->hdbc, SQL_DRIVER_ODBC_VER, szVer, _countof(szVer), &cch);
- if (SQL_SUCCEEDED(ret))
- {
- char* dot = strchr(szVer, '.');
- if (dot)
- {
- *dot = '\0';
- p->odbc_major=(char)atoi(szVer);
- p->odbc_minor=(char)atoi(dot + 1);
- }
- }
-
- char szYN[2];
- ret = SQLGetInfo(cnxn->hdbc, SQL_DESCRIBE_PARAMETER, szYN, _countof(szYN), &cch);
- if (SQL_SUCCEEDED(ret))
- {
- p->supports_describeparam = szYN[0] == 'Y';
- }
-
- // These defaults are tiny, but are necessary for Access.
- p->varchar_maxlength = 255;
- p->wvarchar_maxlength = 255;
- p->binary_maxlength = 510;
-
- HSTMT hstmt = 0;
- if (SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_STMT, cnxn->hdbc, &hstmt)))
- {
- SQLINTEGER columnsize;
- if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_TYPE_TIMESTAMP)) && SQL_SUCCEEDED(SQLFetch(hstmt)))
- {
- if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0)))
- p->datetime_precision = (int)columnsize;
-
- SQLFreeStmt(hstmt, SQL_CLOSE);
- }
-
- if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_VARCHAR)) && SQL_SUCCEEDED(SQLFetch(hstmt)))
- {
- if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0)))
- p->varchar_maxlength = (int)columnsize;
-
- SQLFreeStmt(hstmt, SQL_CLOSE);
- }
-
- if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_WVARCHAR)) && SQL_SUCCEEDED(SQLFetch(hstmt)))
- {
- if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0)))
- p->wvarchar_maxlength = (int)columnsize;
-
- SQLFreeStmt(hstmt, SQL_CLOSE);
- }
-
- if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_BINARY)) && SQL_SUCCEEDED(SQLFetch(hstmt)))
- {
- if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0)))
- p->binary_maxlength = (int)columnsize;
-
- SQLFreeStmt(hstmt, SQL_CLOSE);
- }
- }
-
- Py_END_ALLOW_THREADS
-
- // WARNING: Released the lock now.
-
- return info.Detach();
-}
-
-
-PyObject* GetConnectionInfo(PyObject* pConnectionString, Connection* cnxn)
-{
- // Looks-up or creates a CnxnInfo object for the given connection string. The connection string can be a Unicode
- // or String object.
-
- Object hash(GetHash(pConnectionString));
-
- if (hash.IsValid())
- {
- PyObject* info = PyDict_GetItem(map_hash_to_info, hash);
-
- if (info)
- {
- Py_INCREF(info);
- return info;
- }
- }
-
- PyObject* info = CnxnInfo_New(cnxn);
- if (info != 0 && hash.IsValid())
- PyDict_SetItem(map_hash_to_info, hash, info);
-
- return info;
-}
-
-
-PyTypeObject CnxnInfoType =
-{
- PyObject_HEAD_INIT(0)
- 0, // ob_size
- "pyodbc.CnxnInfo", // tp_name
- sizeof(CnxnInfo), // tp_basicsize
- 0, // tp_itemsize
- 0, // destructor tp_dealloc
- 0, // tp_print
- 0, // tp_getattr
- 0, // tp_setattr
- 0, // tp_compare
- 0, // tp_repr
- 0, // tp_as_number
- 0, // tp_as_sequence
- 0, // tp_as_mapping
- 0, // tp_hash
- 0, // tp_call
- 0, // tp_str
- 0, // tp_getattro
- 0, // tp_setattro
- 0, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
-};
+
+// There is a bunch of information we want from connections which requires calls to SQLGetInfo when we first connect.
+// However, this isn't something we really want to do for every connection, so we cache it by the hash of the
+// connection string. When we create a new connection, we copy the values into the connection structure.
+//
+// We hash the connection string since it may contain sensitive information we wouldn't want exposed in a core dump.
+
+#include "pyodbc.h"
+#include "cnxninfo.h"
+#include "connection.h"
+#include "wrapper.h"
+
+// Maps from a Python string of the SHA1 hash to a CnxnInfo object.
+//
+static PyObject* map_hash_to_info;
+
+static PyObject* hashlib; // The hashlib module if Python 2.5+
+static PyObject* sha; // The sha module if Python 2.4
+static PyObject* update; // The string 'update', used in GetHash.
+
+void CnxnInfo_init()
+{
+ // Called during startup to give us a chance to import the hash code. If we can't find it, we'll print a warning
+ // to the console and not cache anything.
+
+ // First try hashlib which was added in 2.5. 2.6 complains using warnings which we don't want affecting the
+ // caller.
+
+ map_hash_to_info = PyDict_New();
+
+ update = PyString_FromString("update");
+
+ hashlib = PyImport_ImportModule("hashlib");
+ if (!hashlib)
+ {
+ sha = PyImport_ImportModule("sha");
+ }
+}
+
+static PyObject* GetHash(PyObject* p)
+{
+#if PY_MAJOR_VERSION >= 3
+ Object bytes(PyUnicode_EncodeUTF8(PyUnicode_AS_UNICODE(p), PyUnicode_GET_SIZE(p), 0));
+ if (!bytes)
+ return 0;
+ p = bytes.Get();
+#endif
+
+ if (hashlib)
+ {
+ Object hash(PyObject_CallMethod(hashlib, "new", "s", "sha1"));
+ if (!hash.IsValid())
+ return 0;
+
+ PyObject_CallMethodObjArgs(hash, update, p, 0);
+ return PyObject_CallMethod(hash, "hexdigest", 0);
+ }
+
+ if (sha)
+ {
+ Object hash(PyObject_CallMethod(sha, "new", 0));
+ if (!hash.IsValid())
+ return 0;
+
+ PyObject_CallMethodObjArgs(hash, update, p, 0);
+ return PyObject_CallMethod(hash, "hexdigest", 0);
+ }
+
+ return 0;
+}
+
+
+static PyObject* CnxnInfo_New(Connection* cnxn)
+{
+#ifdef _MSC_VER
+#pragma warning(disable : 4365)
+#endif
+ CnxnInfo* p = PyObject_NEW(CnxnInfo, &CnxnInfoType);
+ if (!p)
+ return 0;
+ Object info((PyObject*)p);
+
+ // set defaults
+ p->odbc_major = 3;
+ p->odbc_minor = 50;
+ p->supports_describeparam = false;
+ p->datetime_precision = 19; // default: "yyyy-mm-dd hh:mm:ss"
+
+ // WARNING: The GIL lock is released for the *entire* function here. Do not touch any objects, call Python APIs,
+ // etc. We are simply making ODBC calls and setting atomic values (ints & chars). Also, make sure the lock gets
+ // released -- do not add an early exit.
+
+ SQLRETURN ret;
+ Py_BEGIN_ALLOW_THREADS
+
+ char szVer[20];
+ SQLSMALLINT cch = 0;
+ ret = SQLGetInfo(cnxn->hdbc, SQL_DRIVER_ODBC_VER, szVer, _countof(szVer), &cch);
+ if (SQL_SUCCEEDED(ret))
+ {
+ char* dot = strchr(szVer, '.');
+ if (dot)
+ {
+ *dot = '\0';
+ p->odbc_major=(char)atoi(szVer);
+ p->odbc_minor=(char)atoi(dot + 1);
+ }
+ }
+
+ char szYN[2];
+ ret = SQLGetInfo(cnxn->hdbc, SQL_DESCRIBE_PARAMETER, szYN, _countof(szYN), &cch);
+ if (SQL_SUCCEEDED(ret))
+ {
+ p->supports_describeparam = szYN[0] == 'Y';
+ }
+
+ // These defaults are tiny, but are necessary for Access.
+ p->varchar_maxlength = 255;
+ p->wvarchar_maxlength = 255;
+ p->binary_maxlength = 510;
+
+ HSTMT hstmt = 0;
+ if (SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_STMT, cnxn->hdbc, &hstmt)))
+ {
+ SQLINTEGER columnsize;
+ if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_TYPE_TIMESTAMP)) && SQL_SUCCEEDED(SQLFetch(hstmt)))
+ {
+ if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0)))
+ p->datetime_precision = (int)columnsize;
+
+ SQLFreeStmt(hstmt, SQL_CLOSE);
+ }
+
+ if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_VARCHAR)) && SQL_SUCCEEDED(SQLFetch(hstmt)))
+ {
+ if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0)))
+ p->varchar_maxlength = (int)columnsize;
+
+ SQLFreeStmt(hstmt, SQL_CLOSE);
+ }
+
+ if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_WVARCHAR)) && SQL_SUCCEEDED(SQLFetch(hstmt)))
+ {
+ if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0)))
+ p->wvarchar_maxlength = (int)columnsize;
+
+ SQLFreeStmt(hstmt, SQL_CLOSE);
+ }
+
+ if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_BINARY)) && SQL_SUCCEEDED(SQLFetch(hstmt)))
+ {
+ if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0)))
+ p->binary_maxlength = (int)columnsize;
+
+ SQLFreeStmt(hstmt, SQL_CLOSE);
+ }
+ }
+
+ Py_END_ALLOW_THREADS
+
+ // WARNING: Released the lock now.
+
+ return info.Detach();
+}
+
+
+PyObject* GetConnectionInfo(PyObject* pConnectionString, Connection* cnxn)
+{
+ // Looks-up or creates a CnxnInfo object for the given connection string. The connection string can be a Unicode
+ // or String object.
+
+ Object hash(GetHash(pConnectionString));
+
+ if (hash.IsValid())
+ {
+ PyObject* info = PyDict_GetItem(map_hash_to_info, hash);
+
+ if (info)
+ {
+ Py_INCREF(info);
+ return info;
+ }
+ }
+
+ PyObject* info = CnxnInfo_New(cnxn);
+ if (info != 0 && hash.IsValid())
+ PyDict_SetItem(map_hash_to_info, hash, info);
+
+ return info;
+}
+
+
+PyTypeObject CnxnInfoType =
+{
+ PyVarObject_HEAD_INIT(0, 0)
+ "pyodbc.CnxnInfo", // tp_name
+ sizeof(CnxnInfo), // tp_basicsize
+ 0, // tp_itemsize
+ 0, // destructor tp_dealloc
+ 0, // tp_print
+ 0, // tp_getattr
+ 0, // tp_setattr
+ 0, // tp_compare
+ 0, // tp_repr
+ 0, // tp_as_number
+ 0, // tp_as_sequence
+ 0, // tp_as_mapping
+ 0, // tp_hash
+ 0, // tp_call
+ 0, // tp_str
+ 0, // tp_getattro
+ 0, // tp_setattro
+ 0, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+};
View
86 src/cnxninfo.h
@@ -1,43 +1,43 @@
-
-// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-// documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
-// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
-// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
-// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-#ifndef CNXNINFO_H
-#define CNXNINFO_H
-
-struct Connection;
-extern PyTypeObject CnxnInfoType;
-
-struct CnxnInfo
-{
- PyObject_HEAD
-
- // The description of these fields is in the connection structure.
-
- char odbc_major;
- char odbc_minor;
-
- bool supports_describeparam;
- int datetime_precision;
-
- // These are from SQLGetTypeInfo.column_size, so the char ones are in characters, not bytes.
- int varchar_maxlength;
- int wvarchar_maxlength;
- int binary_maxlength;
-};
-
-void CnxnInfo_init();
-
-// Looks-up or creates a CnxnInfo object for the given connection string. The connection string can be a Unicode or
-// String object.
-
-PyObject* GetConnectionInfo(PyObject* pConnectionString, Connection* cnxn);
-
-#endif // CNXNINFO_H
+
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
+// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+#ifndef CNXNINFO_H
+#define CNXNINFO_H
+
+struct Connection;
+extern PyTypeObject CnxnInfoType;
+
+struct CnxnInfo
+{
+ PyObject_HEAD
+
+ // The description of these fields is in the connection structure.
+
+ char odbc_major;
+ char odbc_minor;
+
+ bool supports_describeparam;
+ int datetime_precision;
+
+ // These are from SQLGetTypeInfo.column_size, so the char ones are in characters, not bytes.
+ int varchar_maxlength;
+ int wvarchar_maxlength;
+ int binary_maxlength;
+};
+
+void CnxnInfo_init();
+
+// Looks-up or creates a CnxnInfo object for the given connection string. The connection string can be a Unicode or
+// String object.
+
+PyObject* GetConnectionInfo(PyObject* pConnectionString, Connection* cnxn);
+
+#endif // CNXNINFO_H
View
41 src/connection.cpp
@@ -90,14 +90,17 @@ static bool Connect(PyObject* pConnectString, HDBC hdbc, bool fAnsi, long timeou
// The Unicode function failed. If the error is that the driver doesn't have a Unicode version (IM001), continue
// to the ANSI version.
-
- PyObject* error = GetErrorFromHandle("SQLDriverConnectW", hdbc, SQL_NULL_HANDLE);
- if (!HasSqlState(error, "IM001"))
- {
- RaiseErrorFromException(error);
- return false;
- }
- Py_XDECREF(error);
+ //
+ // I've commented this out since a number of common drivers are returning different errors. The MySQL 5
+ // driver, for example, returns IM002 "Data source name not found...".
+ //
+ // PyObject* error = GetErrorFromHandle("SQLDriverConnectW", hdbc, SQL_NULL_HANDLE);
+ // if (!HasSqlState(error, "IM001"))
+ // {
+ // RaiseErrorFromException(error);
+ // return false;
+ // }
+ // Py_XDECREF(error);
}
SQLCHAR szConnect[cchMax];
@@ -117,8 +120,13 @@ static bool Connect(PyObject* pConnectString, HDBC hdbc, bool fAnsi, long timeou
}
else
{
+#if PY_MAJOR_VERSION < 3
const char* p = PyString_AS_STRING(pConnectString);
memcpy(szConnect, p, (size_t)(PyString_GET_SIZE(pConnectString) + 1));
+#else
+ PyErr_SetString(PyExc_TypeError, "Connection strings must be Unicode");
+ return false;
+#endif
}
Py_BEGIN_ALLOW_THREADS
@@ -171,7 +179,13 @@ PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi,
// Set all variables to something valid, so we don't crash in dealloc if this function fails.
+#ifdef _MSC_VER
+#pragma warning(disable : 4365)
+#endif
Connection* cnxn = PyObject_NEW(Connection, &ConnectionType);
+#ifdef _MSC_VER
+#pragma warning(default : 4365)
+#endif
if (cnxn == 0)
{
@@ -573,10 +587,14 @@ Connection_getinfo(PyObject* self, PyObject* args)
case GI_UINTEGER:
{
SQLUINTEGER n = *(SQLUINTEGER*)szBuffer; // Does this work on PPC or do we need a union?
+#if PY_MAJOR_VERSION >= 3
+ result = PyLong_FromLong((long)n);
+#else
if (n <= (SQLUINTEGER)PyInt_GetMax())
result = PyInt_FromLong((long)n);
else
result = PyLong_FromUnsignedLong(n);
+#endif
break;
}
@@ -679,7 +697,7 @@ Connection_setautocommit(PyObject* self, PyObject* value, void* closure)
return -1;
}
- SQLUINTEGER nAutoCommit = PyObject_IsTrue(value) ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF;
+ uintptr_t nAutoCommit = PyObject_IsTrue(value) ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF;
SQLRETURN ret;
Py_BEGIN_ALLOW_THREADS
ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)nAutoCommit, SQL_IS_UINTEGER);
@@ -747,7 +765,7 @@ Connection_settimeout(PyObject* self, PyObject* value, void* closure)
PyErr_SetString(PyExc_TypeError, "Cannot delete the timeout attribute.");
return -1;
}
- int timeout = PyInt_AsLong(value);
+ intptr_t timeout = PyInt_AsLong(value);
if (timeout == -1 && PyErr_Occurred())
return -1;
if (timeout < 0)
@@ -907,8 +925,7 @@ static PyGetSetDef Connection_getseters[] = {
PyTypeObject ConnectionType =
{
- PyObject_HEAD_INIT(0)
- 0, // ob_size
+ PyVarObject_HEAD_INIT(0, 0)
"pyodbc.Connection", // tp_name
sizeof(Connection), // tp_basicsize
0, // tp_itemsize
View
6 src/connection.h
@@ -24,7 +24,7 @@ struct Connection
HDBC hdbc;
// Will be SQL_AUTOCOMMIT_ON or SQL_AUTOCOMMIT_OFF.
- SQLUINTEGER nAutoCommit;
+ uintptr_t nAutoCommit;
// The ODBC version the driver supports, from SQLGetInfo(DRIVER_ODBC_VER). This is set after connecting.
char odbc_major;
@@ -44,7 +44,7 @@ struct Connection
bool unicode_results;
// The connection timeout in seconds.
- int timeout;
+ intptr_t timeout;
// These are copied from cnxn info for performance and convenience.
@@ -65,7 +65,7 @@ struct Connection
};
#define Connection_Check(op) PyObject_TypeCheck(op, &ConnectionType)
-#define Connection_CheckExact(op) ((op)->ob_type == &ConnectionType)
+#define Connection_CheckExact(op) (Py_TYPE(op) == &ConnectionType)
/*
* Used by the module's connect function to create new connection objects. If unable to connect to the database, an
View
166 src/cursor.cpp
@@ -24,6 +24,7 @@
#include "getdata.h"
#include "dbspecific.h"
#include "sqlwchar.h"
+#include <datetime.h>
enum
{
@@ -44,7 +45,7 @@ extern PyTypeObject CursorType;
inline bool
Cursor_Check(PyObject* o)
{
- return o != 0 && o->ob_type == &CursorType;
+ return o != 0 && Py_TYPE(o) == &CursorType;
}
@@ -127,8 +128,7 @@ inline bool IsNumericType(SQLSMALLINT sqltype)
}
-PyObject*
-PythonTypeFromSqlType(Cursor* cur, const SQLCHAR* name, SQLSMALLINT type, bool unicode_results)
+PyObject* PythonTypeFromSqlType(Cursor* cur, const SQLCHAR* name, SQLSMALLINT type, bool unicode_results)
{
// Returns a type object ('int', 'str', etc.) for the given ODBC C type. This is used to populate
// Cursor.description with the type of Python object that will be returned for each column.
@@ -201,7 +201,11 @@ PythonTypeFromSqlType(Cursor* cur, const SQLCHAR* name, SQLSMALLINT type, bool u
case SQL_BINARY:
case SQL_VARBINARY:
case SQL_LONGVARBINARY:
+#if PY_MAJOR_VERSION >= 3
+ pytype = (PyObject*)&PyBytes_Type;
+#else
pytype = (PyObject*)&PyBuffer_Type;
+#endif
break;
@@ -360,36 +364,21 @@ create_name_map(Cursor* cur, SQLSMALLINT field_count, bool lower)
return success;
}
-
-enum free_results_flags
+enum free_results_type
{
- FREE_STATEMENT = 0x01,
- KEEP_STATEMENT = 0x02,
- FREE_PREPARED = 0x04,
- KEEP_PREPARED = 0x08,
-
- STATEMENT_MASK = 0x03,
- PREPARED_MASK = 0x0C
+ FREE_STATEMENT,
+ KEEP_STATEMENT
};
static bool
-free_results(Cursor* self, int flags)
+free_results(Cursor* self, free_results_type free_statement)
{
// Internal function called any time we need to free the memory associated with query results. It is safe to call
// this even when a query has not been executed.
// If we ran out of memory, it is possible that we have a cursor but colinfos is zero. However, we should be
// deleting this object, so the cursor will be freed when the HSTMT is destroyed. */
- I((flags & STATEMENT_MASK) != 0);
- I((flags & PREPARED_MASK) != 0);
-
- if ((flags & PREPARED_MASK) == FREE_PREPARED)
- {
- Py_XDECREF(self->pPreparedSQL);
- self->pPreparedSQL = 0;
- }
-
if (self->colinfos)
{
pyodbc_free(self->colinfos);
@@ -398,21 +387,18 @@ free_results(Cursor* self, int flags)
if (StatementIsValid(self))
{
- if ((flags & STATEMENT_MASK) == FREE_STATEMENT)
+ if (free_statement == FREE_STATEMENT)
{
- SQLRETURN ret;
Py_BEGIN_ALLOW_THREADS
- ret = SQLFreeStmt(self->hstmt, SQL_CLOSE);
+ SQLFreeStmt(self->hstmt, SQL_CLOSE);
Py_END_ALLOW_THREADS;
}
else
{
- SQLRETURN ret;
Py_BEGIN_ALLOW_THREADS
- ret = SQLFreeStmt(self->hstmt, SQL_UNBIND);
- ret = SQLFreeStmt(self->hstmt, SQL_RESET_PARAMS);
+ SQLFreeStmt(self->hstmt, SQL_UNBIND);
+ SQLFreeStmt(self->hstmt, SQL_RESET_PARAMS);
Py_END_ALLOW_THREADS;
-
}
if (self->cnxn->hdbc == SQL_NULL_HANDLE)
@@ -449,7 +435,7 @@ closeimpl(Cursor* cur)
//
// This method releases the GIL lock while closing, so verify the HDBC still exists if you use it.
- free_results(cur, FREE_STATEMENT | FREE_PREPARED);
+ free_results(cur, FREE_STATEMENT);
FreeParameterInfo(cur);
FreeParameterData(cur);
@@ -655,7 +641,7 @@ execute(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first)
SQLRETURN ret = 0;
- free_results(cur, FREE_STATEMENT | KEEP_PREPARED);
+ free_results(cur, FREE_STATEMENT);
const char* szLastFunction = "";
@@ -682,13 +668,15 @@ execute(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first)
cur->pPreparedSQL = 0;
szLastFunction = "SQLExecDirect";
+#if PY_MAJOR_VERSION < 3
if (PyString_Check(pSql))
{
Py_BEGIN_ALLOW_THREADS
ret = SQLExecDirect(cur->hstmt, (SQLCHAR*)PyString_AS_STRING(pSql), SQL_NTS);
Py_END_ALLOW_THREADS
}
else
+#endif
{
SQLWChar query(pSql);
if (!query)
@@ -734,46 +722,47 @@ execute(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first)
if (ret == SQL_NEED_DATA)
{
szLastFunction = "SQLPutData";
- if (PyBuffer_Check(pParam))
+ if (PyUnicode_Check(pParam))
{
- // Buffers can have multiple segments, so we might need multiple writes. Looping through buffers isn't
- // difficult, but we've wrapped it up in an iterator object to keep this loop simple.
+ SQLWChar wchar(pParam); // Will convert to SQLWCHAR if necessary.
- BufferSegmentIterator it(pParam);
- byte* pb;
- SQLLEN cb;
- while (it.Next(pb, cb))
+ Py_ssize_t offset = 0; // in characters
+ Py_ssize_t length = wchar.size(); // in characters
+
+ while (offset < length)
{
+ SQLLEN remaining = min(cur->cnxn->varchar_maxlength, length - offset);
Py_BEGIN_ALLOW_THREADS
- ret = SQLPutData(cur->hstmt, pb, cb);
+ ret = SQLPutData(cur->hstmt, (SQLPOINTER)wchar[offset], (SQLLEN)(remaining * sizeof(SQLWCHAR)));
Py_END_ALLOW_THREADS
if (!SQL_SUCCEEDED(ret))
return RaiseErrorFromHandle("SQLPutData", cur->cnxn->hdbc, cur->hstmt);
+ offset += remaining;
}
}
- else if (PyUnicode_Check(pParam))
+ else if (PyBytes_Check(pParam))
{
- SQLWChar wchar(pParam); // Will convert to SQLWCHAR if necessary.
-
- Py_ssize_t offset = 0; // in characters
- Py_ssize_t length = wchar.size(); // in characters
-
- while (offset < length)
+ const char* p = PyBytes_AS_STRING(pParam);
+ SQLLEN offset = 0;
+ SQLLEN cb = (SQLLEN)PyBytes_GET_SIZE(pParam);
+ while (offset < cb)
{
- SQLLEN remaining = min(cur->cnxn->varchar_maxlength, length - offset);
+ SQLLEN remaining = min(cur->cnxn->varchar_maxlength, cb - offset);
+ TRACE("SQLPutData [%d] (%d) %s\n", offset, remaining, &p[offset]);
Py_BEGIN_ALLOW_THREADS
- ret = SQLPutData(cur->hstmt, (SQLPOINTER)wchar[offset], (SQLLEN)(remaining * sizeof(SQLWCHAR)));
+ ret = SQLPutData(cur->hstmt, (SQLPOINTER)&p[offset], remaining);
Py_END_ALLOW_THREADS
if (!SQL_SUCCEEDED(ret))
return RaiseErrorFromHandle("SQLPutData", cur->cnxn->hdbc, cur->hstmt);
offset += remaining;
}
}
- else if (PyString_Check(pParam))
+#if PY_VERSION_HEX >= 0x02060000
+ else if (PyByteArray_Check(pParam))
{
- const char* p = PyString_AS_STRING(pParam);
+ const char* p = PyByteArray_AS_STRING(pParam);
SQLLEN offset = 0;
- SQLLEN cb = (SQLLEN)PyString_GET_SIZE(pParam);
+ SQLLEN cb = (SQLLEN)PyByteArray_GET_SIZE(pParam);
while (offset < cb)
{
SQLLEN remaining = min(cur->cnxn->varchar_maxlength, cb - offset);
@@ -786,7 +775,26 @@ execute(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first)
offset += remaining;
}
}
+#endif
+#if PY_MAJOR_VERSION < 3
+ else if (PyBuffer_Check(pParam))
+ {
+ // Buffers can have multiple segments, so we might need multiple writes. Looping through buffers isn't
+ // difficult, but we've wrapped it up in an iterator object to keep this loop simple.
+ BufferSegmentIterator it(pParam);
+ byte* pb;
+ SQLLEN cb;
+ while (it.Next(pb, cb))
+ {
+ Py_BEGIN_ALLOW_THREADS
+ ret = SQLPutData(cur->hstmt, pb, cb);
+ Py_END_ALLOW_THREADS
+ if (!SQL_SUCCEEDED(ret))
+ return RaiseErrorFromHandle("SQLPutData", cur->cnxn->hdbc, cur->hstmt);
+ }
+ }
+#endif
ret = SQL_NEED_DATA;
}
}
@@ -853,10 +861,13 @@ execute(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first)
return (PyObject*)cur;
}
-inline bool
-IsSequence(PyObject* p)
+inline bool IsSequence(PyObject* p)
{
- return PySequence_Check(p) && !PyString_Check(p) && !PyBuffer_Check(p) && !PyUnicode_Check(p);
+ // Used to determine if the first parameter of execute is a collection of SQL parameters or is a SQL parameter
+ // itself. If the first parameter is a list, tuple, or Row object, then we consider it a collection. Anything
+ // else, including other sequences (e.g. bytearray), are considered SQL parameters.
+
+ return PyList_Check(p) || PyTuple_Check(p) || Row_Check(p);
}
static char execute_doc[] =
@@ -873,8 +884,7 @@ static char execute_doc[] =
"\n"
" cursor.execute(sql, param1, param2)\n";
-PyObject*
-Cursor_execute(PyObject* self, PyObject* args)
+PyObject* Cursor_execute(PyObject* self, PyObject* args)
{
Py_ssize_t cParams = PyTuple_Size(args) - 1;
@@ -1177,7 +1187,7 @@ Cursor_tables(PyObject* self, PyObject* args, PyObject* kwargs)
Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN);
- if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED))
+ if (!free_results(cur, FREE_STATEMENT))
return 0;
SQLRETURN ret = 0;
@@ -1247,7 +1257,7 @@ Cursor_columns(PyObject* self, PyObject* args, PyObject* kwargs)
Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN);
- if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED))
+ if (!free_results(cur, FREE_STATEMENT))
return 0;
SQLRETURN ret = 0;
@@ -1320,7 +1330,7 @@ Cursor_statistics(PyObject* self, PyObject* args, PyObject* kwargs)
Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN);
- if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED))
+ if (!free_results(cur, FREE_STATEMENT))
return 0;
SQLUSMALLINT nUnique = (SQLUSMALLINT)(PyObject_IsTrue(pUnique) ? SQL_INDEX_UNIQUE : SQL_INDEX_ALL);
@@ -1398,7 +1408,7 @@ _specialColumns(PyObject* self, PyObject* args, PyObject* kwargs, SQLUSMALLINT n
Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN);
- if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED))
+ if (!free_results(cur, FREE_STATEMENT))
return 0;
SQLRETURN ret = 0;
@@ -1470,7 +1480,7 @@ Cursor_primaryKeys(PyObject* self, PyObject* args, PyObject* kwargs)
Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN);
- if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED))
+ if (!free_results(cur, FREE_STATEMENT))
return 0;
SQLRETURN ret = 0;
@@ -1542,7 +1552,7 @@ Cursor_foreignKeys(PyObject* self, PyObject* args, PyObject* kwargs)
Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN);
- if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED))
+ if (!free_results(cur, FREE_STATEMENT))
return 0;
SQLRETURN ret = 0;
@@ -1611,7 +1621,7 @@ Cursor_getTypeInfo(PyObject* self, PyObject* args, PyObject* kwargs)
Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN);
- if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED))
+ if (!free_results(cur, FREE_STATEMENT))
return 0;
SQLRETURN ret = 0;
@@ -1659,7 +1669,7 @@ Cursor_nextset(PyObject* self, PyObject* args)
if (ret == SQL_NO_DATA)
{
- free_results(cur, FREE_STATEMENT | KEEP_PREPARED);
+ free_results(cur, FREE_STATEMENT);
Py_RETURN_FALSE;
}
@@ -1672,10 +1682,10 @@ Cursor_nextset(PyObject* self, PyObject* args)
// Note: The SQL Server driver sometimes returns HY007 here if multiple statements (separated by ;) were
// submitted. This is not documented, but I've seen it with multiple successful inserts.
- free_results(cur, FREE_STATEMENT | KEEP_PREPARED);
+ free_results(cur, FREE_STATEMENT);
return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt);
}
- free_results(cur, KEEP_STATEMENT | KEEP_PREPARED);
+ free_results(cur, KEEP_STATEMENT);
if (cCols != 0)
{
@@ -1739,7 +1749,7 @@ Cursor_procedureColumns(PyObject* self, PyObject* args, PyObject* kwargs)
Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN);
- if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED))
+ if (!free_results(cur, FREE_STATEMENT))
return 0;
SQLRETURN ret = 0;
@@ -1799,7 +1809,7 @@ Cursor_procedures(PyObject* self, PyObject* args, PyObject* kwargs)
Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN);
- if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED))
+ if (!free_results(cur, FREE_STATEMENT))
return 0;
SQLRETURN ret = 0;
@@ -1950,7 +1960,7 @@ static PyObject* Cursor_setnoscan(PyObject* self, PyObject* value, void *closure
return 0;
}
- SQLUINTEGER noscan = PyObject_IsTrue(value) ? SQL_NOSCAN_ON : SQL_NOSCAN_OFF;
+ uintptr_t noscan = PyObject_IsTrue(value) ? SQL_NOSCAN_ON : SQL_NOSCAN_OFF;
SQLRETURN ret;
Py_BEGIN_ALLOW_THREADS
ret = SQLSetStmtAttr(cursor->hstmt, SQL_ATTR_NOSCAN, (SQLPOINTER)noscan, 0);
@@ -2058,8 +2068,7 @@ static char cursor_doc[] =
PyTypeObject CursorType =
{
- PyObject_HEAD_INIT(0)
- 0, // ob_size
+ PyVarObject_HEAD_INIT(0, 0)
"pyodbc.Cursor", // tp_name
sizeof(Cursor), // tp_basicsize
0, // tp_itemsize
@@ -2078,7 +2087,11 @@ PyTypeObject CursorType =
0, // tp_getattro
0, // tp_setattro
0, // tp_as_buffer
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_ITER, // tp_flags
+#if defined(Py_TPFLAGS_HAVE_ITER)
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_ITER,
+#else
+ Py_TPFLAGS_DEFAULT,
+#endif
cursor_doc, // tp_doc
0, // tp_traverse
0, // tp_clear
@@ -2111,7 +2124,13 @@ Cursor_New(Connection* cnxn)
{
// Exported to allow the connection class to create cursors.
+#ifdef _MSC_VER
+#pragma warning(disable : 4365)
+#endif
Cursor* cur = PyObject_NEW(Cursor, &CursorType);
+#ifdef _MSC_VER
+#pragma warning(default : 4365)
+#endif
if (cur)
{
@@ -2162,8 +2181,7 @@ Cursor_New(Connection* cnxn)
return cur;
}
-void
-Cursor_init()
+void Cursor_init()
{
PyDateTime_IMPORT;
}
View
6 src/cursor.h
@@ -52,9 +52,9 @@ struct ParamInfo
// If true, the memory in ParameterValuePtr was allocated via malloc and must be freed.
bool allocated;
- // The python object containing the parameter value. A reference to this
- // object should be held until we have finished using memory owned by it.
- PyObject *pyParameterValue;
+ // The python object containing the parameter value. A reference to this object should be held until we have
+ // finished using memory owned by it.
+ PyObject* pParam;
// Optional data. If used, ParameterValuePtr will point into this.
union
View
64 src/dbspecific.h
@@ -1,32 +1,32 @@
-
-#ifndef DBSPECIFIC_H
-#define DBSPECIFIC_H
-
-// Items specific to databases.
-//
-// Obviously we'd like to minimize this, but if they are needed this file isolates them. I'd like for there to be a
-// single build of pyodbc on each platform and not have a bunch of defines for supporting different databases.
-
-
-// ---------------------------------------------------------------------------------------------------------------------
-// SQL Server
-
-
-// SQL Server 2005 xml type
-
-#define SQL_SS_XML -152
-
-
-// SQL Server 2008 time type
-
-#define SQL_SS_TIME2 -154
-
-struct SQL_SS_TIME2_STRUCT
-{
- SQLUSMALLINT hour;
- SQLUSMALLINT minute;
- SQLUSMALLINT second;
- SQLUINTEGER fraction;
-};
-
-#endif // DBSPECIFIC_H
+
+#ifndef DBSPECIFIC_H
+#define DBSPECIFIC_H
+
+// Items specific to databases.
+//
+// Obviously we'd like to minimize this, but if they are needed this file isolates them. I'd like for there to be a
+// single build of pyodbc on each platform and not have a bunch of defines for supporting different databases.
+
+
+// ---------------------------------------------------------------------------------------------------------------------
+// SQL Server
+
+
+// SQL Server 2005 xml type
+
+#define SQL_SS_XML -152
+
+
+// SQL Server 2008 time type
+
+#define SQL_SS_TIME2 -154
+
+struct SQL_SS_TIME2_STRUCT
+{
+ SQLUSMALLINT hour;
+ SQLUSMALLINT minute;
+ SQLUSMALLINT second;
+ SQLUINTEGER fraction;
+};
+
+#endif // DBSPECIFIC_H
View
13 src/errors.cpp
@@ -86,6 +86,12 @@ RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* format, ...)
return 0;
}
+#if PY_MAJOR_VERSION < 3
+#define PyString_CompareWithASCIIString(lhs, rhs) _strcmpi(PyString_AS_STRING(lhs), rhs)
+#else
+#define PyString_CompareWithASCIIString PyUnicode_CompareWithASCIIString
+#endif
+
bool HasSqlState(PyObject* ex, const char* szSqlState)
{
// Returns true if `ex` is an exception and has the given SQLSTATE. It is safe to pass 0 for ex.
@@ -100,9 +106,10 @@ bool HasSqlState(PyObject* ex, const char* szSqlState)
PyObject* s = PySequence_GetItem(args, 1);
if (s != 0 && PyString_Check(s))
{
- const char* sz = PyString_AsString(s);
- if (sz && _strcmpi(sz, szSqlState) == 0)
- has = true;
+ // const char* sz = PyString_AsString(s);
+ // if (sz && _strcmpi(sz, szSqlState) == 0)
+ // has = true;
+ has = (PyString_CompareWithASCIIString(s, szSqlState) == 0);
}
Py_XDECREF(s);
Py_DECREF(args);
View
124 src/errors.h
@@ -1,60 +1,64 @@
-
-#ifndef _ERRORS_H_
-#define _ERRORS_H_
-
-// Sets an exception based on the ODBC SQLSTATE and error message and returns zero. If either handle is not available,
-// pass SQL_NULL_HANDLE.
-//
-// szFunction
-// The name of the function that failed. Python generates a useful stack trace, but we often don't know where in the
-// C++ code we failed.
-//
-PyObject* RaiseErrorFromHandle(const char* szFunction, HDBC hdbc, HSTMT hstmt);
-
-// Sets an exception using a printf-like error message.
-//
-// szSqlState
-// The optional SQLSTATE reported by ODBC. If not provided (sqlstate is NULL or sqlstate[0] is NULL), "HY000"
-// (General Error) is used. Note that HY000 causes Error to be used if exc_class is not provided.
-//
-// exc_class
-// The optional exception class (DatabaseError, etc.) to construct. If NULL, the appropriate class will be
-// determined from the SQLSTATE.
-//
-PyObject* RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* format, ...);
-
-
-// Constructs an exception and returns it.
-//
-// This function is like RaiseErrorFromHandle, but gives you the ability to examine the error first (in particular,
-// used to examine the SQLSTATE using HasSqlState). If you want to use the error, call PyErr_SetObject(ex->ob_type,
-// ex). Otherwise, dispose of the error using Py_DECREF(ex).
-//
-// szFunction
-// The name of the function that failed. Python generates a useful stack trace, but we often don't know where in the
-// C++ code we failed.
-//
-PyObject* GetErrorFromHandle(const char* szFunction, HDBC hdbc, HSTMT hstmt);
-
-
-// Returns true if `ex` is a database exception with SQLSTATE `szSqlState`. Returns false otherwise.
-//
-// It is safe to call with ex set to zero. The SQLSTATE comparison is case-insensitive.
-//
-bool HasSqlState(PyObject* ex, const char* szSqlState);
-
-
-// Returns true if the HSTMT has a diagnostic record with the given SQLSTATE. This is used after SQLGetData call that
-// returned SQL_SUCCESS_WITH_INFO to see if it also has SQLSTATE 01004, indicating there is more data.
-//
-bool HasSqlState(HSTMT hstmt, const char* szSqlState);
-
-inline PyObject* RaiseErrorFromException(PyObject* pError)
-{
- // PyExceptionInstance_Class doesn't exist in 2.4
- PyObject* cls = (PyObject*)((PyInstance_Check(pError) ? (PyObject*)((PyInstanceObject*)pError)->in_class : (PyObject*)(pError->ob_type)));
- PyErr_SetObject(cls, pError);
- return 0;
-}
-
-#endif // _ERRORS_H_
+
+#ifndef _ERRORS_H_
+#define _ERRORS_H_
+
+// Sets an exception based on the ODBC SQLSTATE and error message and returns zero. If either handle is not available,
+// pass SQL_NULL_HANDLE.
+//
+// szFunction
+// The name of the function that failed. Python generates a useful stack trace, but we often don't know where in the
+// C++ code we failed.
+//
+PyObject* RaiseErrorFromHandle(const char* szFunction, HDBC hdbc, HSTMT hstmt);
+
+// Sets an exception using a printf-like error message.
+//
+// szSqlState
+// The optional SQLSTATE reported by ODBC. If not provided (sqlstate is NULL or sqlstate[0] is NULL), "HY000"
+// (General Error) is used. Note that HY000 causes Error to be used if exc_class is not provided.
+//
+// exc_class
+// The optional exception class (DatabaseError, etc.) to construct. If NULL, the appropriate class will be
+// determined from the SQLSTATE.
+//
+PyObject* RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* format, ...);
+
+
+// Constructs an exception and returns it.
+//
+// This function is like RaiseErrorFromHandle, but gives you the ability to examine the error first (in particular,
+// used to examine the SQLSTATE using HasSqlState). If you want to use the error, call PyErr_SetObject(ex->ob_type,
+// ex). Otherwise, dispose of the error using Py_DECREF(ex).
+//
+// szFunction
+// The name of the function that failed. Python generates a useful stack trace, but we often don't know where in the
+// C++ code we failed.
+//
+PyObject* GetErrorFromHandle(const char* szFunction, HDBC hdbc, HSTMT hstmt);
+
+
+// Returns true if `ex` is a database exception with SQLSTATE `szSqlState`. Returns false otherwise.
+//
+// It is safe to call with ex set to zero. The SQLSTATE comparison is case-insensitive.
+//
+bool HasSqlState(PyObject* ex, const char* szSqlState);
+
+
+// Returns true if the HSTMT has a diagnostic record with the given SQLSTATE. This is used after SQLGetData call that
+// returned SQL_SUCCESS_WITH_INFO to see if it also has SQLSTATE 01004, indicating there is more data.
+//
+bool HasSqlState(HSTMT hstmt, const char* szSqlState);
+
+inline PyObject* RaiseErrorFromException(PyObject* pError)
+{
+ // PyExceptionInstance_Class doesn't exist in 2.4
+#if PY_MAJOR_VERSION >= 3
+ PyErr_SetObject((PyObject*)Py_TYPE(pError), pError);
+#else
+ PyObject* cls = (PyObject*)((PyInstance_Check(pError) ? (PyObject*)((PyInstanceObject*)pError)->in_class : (PyObject*)(Py_TYPE(pError))));
+ PyErr_SetObject(cls, pError);
+#endif
+ return 0;
+}
+
+#endif // _ERRORS_H_
View
1,400 src/getdata.cpp
@@ -1,667 +1,733 @@
-
-// The functions for reading a single value from the database using SQLGetData. There is a different function for
-// every data type.
-
-#include "pyodbc.h"
-#include "pyodbcmodule.h"
-#include "cursor.h"
-#include "connection.h"
-#include "errors.h"
-#include "dbspecific.h"
-#include "sqlwchar.h"
-
-void GetData_init()
-{
- PyDateTime_IMPORT;
-}
-
-class DataBuffer
-{
- // Manages memory that GetDataString uses to read data in chunks. We use the same function (GetDataString) to read
- // variable length data for 3 different types of data: binary, ANSI, and Unicode. This class abstracts out the
- // memory management details to keep the function simple.
- //
- // There are 3 potential data buffer types we deal with in GetDataString:
- //
- // 1) Binary, which is a simple array of 8-bit bytes.
- // 2) ANSI text, which is an array of chars with a NULL terminator.
- // 3) Unicode text, which is an array of SQLWCHARs with a NULL terminator.
- //
- // When dealing with Unicode, there are two widths we have to be aware of: (1) SQLWCHAR and (2) Py_UNICODE. If
- // these are the same we can use a PyUnicode object so we don't have to allocate our own buffer and then the
- // Unicode object. If they are not the same (e.g. OS/X where wchar_t-->4 Py_UNICODE-->2) then we need to maintain
- // our own buffer and pass it to the PyUnicode object later. Many Linux distros are now using UCS4, so Py_UNICODE
- // will be larger than SQLWCHAR.
- //
- // To reduce heap fragmentation, we perform the initial read into an array on the stack since we don't know the
- // length of the data. If the data doesn't fit, this class then allocates new memory. If the first read gives us
- // the length, then we create a Python object of the right size and read into its memory.
-
-private:
- SQLSMALLINT dataType;
-
- char* buffer;
- Py_ssize_t bufferSize; // How big is the buffer.
- int bytesUsed; // How many elements have been read into the buffer?
-
- PyObject* bufferOwner; // If possible, we bind into a PyString or PyUnicode object.
- int element_size; // How wide is each character: ASCII/ANSI -> 1, Unicode -> 2 or 4, binary -> 1
-
- bool usingStack; // Is buffer pointing to the initial stack buffer?
-
-public:
- int null_size; // How much room, in bytes, to add for null terminator: binary -> 0, other -> same as a element_size
-
- DataBuffer(SQLSMALLINT dataType, char* stackBuffer, SQLLEN stackBufferSize)
- {
- // dataType
- // The type of data we will be reading: SQL_C_CHAR, SQL_C_WCHAR, or SQL_C_BINARY.
-
- this->dataType = dataType;
-
- element_size = (int)((dataType == SQL_C_WCHAR) ? sizeof(SQLWCHAR) : sizeof(char));
- null_size = (dataType == SQL_C_BINARY) ? 0 : element_size;
-
- buffer = stackBuffer;
- bufferSize = stackBufferSize;
- usingStack = true;
- bufferOwner = 0;
- bytesUsed = 0;
- }
-
- ~DataBuffer()
- {
- if (!usingStack)
- {
- if (bufferOwner)
- {
- Py_DECREF(bufferOwner);
- }
- else
- {
- pyodbc_free(buffer);
- }
- }
- }
-
- char* GetBuffer()
- {
- if (!buffer)
- return 0;
-
- return buffer + bytesUsed;
- }
-
- SQLLEN GetRemaining()
- {
- // Returns the amount of data remaining in the buffer, ready to be passed to SQLGetData.
- return bufferSize - bytesUsed;
- }
-
- void AddUsed(SQLLEN cbRead)
- {
- I(cbRead <= GetRemaining());
- bytesUsed += (int)cbRead;
- }
-
- bool AllocateMore(SQLLEN cbAdd)
- {
- // cbAdd
- // The number of bytes (cb --> count of bytes) to add.
-
- if (cbAdd == 0)
- return true;
-
- SQLLEN newSize = bufferSize + cbAdd;
-
- if (usingStack)
- {
- // This is the first call and `buffer` points to stack memory. Allocate a new object and copy the stack
- // data into it.
-
- char* stackBuffer = buffer;
-
- if (dataType == SQL_C_CHAR || dataType == SQL_C_BINARY)
- {
- bufferOwner = PyString_FromStringAndSize(0, newSize);
- buffer = bufferOwner ? PyString_AS_STRING(bufferOwner) : 0;
- }
- else if (sizeof(SQLWCHAR) == Py_UNICODE_SIZE)
- {
- // Allocate directly into a Unicode object.
- bufferOwner = PyUnicode_FromUnicode(0, newSize / element_size);
- buffer = bufferOwner ? (char*)PyUnicode_AsUnicode(bufferOwner) : 0;
- }
- else
- {
- // We're Unicode, but SQLWCHAR and Py_UNICODE don't match, so maintain our own SQLWCHAR buffer.
- buffer = (char*)pyodbc_malloc((size_t)newSize);
- }
-
- if (buffer == 0)
- return false;
-
- usingStack = false;
-
- memcpy(buffer, stackBuffer, (size_t)bufferSize);
- bufferSize = newSize;
- return true;
- }
-
- if (bufferOwner && PyString_CheckExact(bufferOwner))
- {
- if (_PyString_Resize(&bufferOwner, newSize) == -1)
- return false;
- buffer = PyString_AS_STRING(bufferOwner);
- }
- else if (bufferOwner && PyUnicode_CheckExact(bufferOwner))
- {
- if (PyUnicode_Resize(&bufferOwner, newSize / element_size) == -1)
- return false;
- buffer = (char*)PyUnicode_AsUnicode(bufferOwner);
- }
- else
- {
- char* tmp = (char*)realloc(buffer, (size_t)newSize);
- if (tmp == 0)
- return false;
- buffer = tmp;
- }
-
- bufferSize = newSize;
-
- return true;
- }
-
- PyObject* DetachValue()
- {
- // At this point, Trim should have been called by PostRead.
-
- if (bytesUsed == SQL_NULL_DATA || buffer == 0)
- Py_RETURN_NONE;
-
- if (usingStack)
- {
- if (dataType == SQL_C_CHAR || dataType == SQL_C_BINARY)
- return PyString_FromStringAndSize(buffer, bytesUsed);
-
- if (sizeof(SQLWCHAR) == Py_UNICODE_SIZE)
- return PyUnicode_FromUnicode((const Py_UNICODE*)buffer, bytesUsed / element_size);
-
- return PyUnicode_FromSQLWCHAR((const SQLWCHAR*)buffer, bytesUsed / element_size);
- }
-
- if (bufferOwner && PyString_CheckExact(bufferOwner))
- {
- if (_PyString_Resize(&bufferOwner, bytesUsed) == -1)
- return 0;
- PyObject* tmp = bufferOwner;
- bufferOwner = 0;
- buffer = 0;
- return tmp;
- }
-
- if (bufferOwner && PyUnicode_CheckExact(bufferOwner))
- {
- if (PyUnicode_Resize(&bufferOwner, bytesUsed / element_size) == -1)
- return 0;
- PyObject* tmp = bufferOwner;
- bufferOwner = 0;
- buffer = 0;
- return tmp;
- }
-
- // We have allocated our own SQLWCHAR buffer and must now copy it to a Unicode object.
- PyObject* result = PyUnicode_FromSQLWCHAR((const SQLWCHAR*)buffer, bytesUsed / element_size);
- if (result == 0)
- return false;
- pyodbc_free(buffer);
- buffer = 0;
- return result;
- }
-};
-
-static PyObject*
-GetDataString(Cursor* cur, Py_ssize_t iCol)
-{
- // Returns a String or Unicode object for character and binary data.
-
- // NULL terminator notes:
- //
- // * pinfo->column_size, from SQLDescribeCol, does not include a NULL terminator. For example, column_size for a
- // char(10) column would be 10. (Also, when dealing with SQLWCHAR, it is the number of *characters*, not bytes.)
- //
- // * When passing a length to PyString_FromStringAndSize and similar Unicode functions, do not add the NULL
- // terminator -- it will be added automatically. See objects/stringobject.c
- //
- // * SQLGetData does not return the NULL terminator in the length indicator. (Therefore, you can pass this value
- // directly to the Python string functions.)
- //
- // * SQLGetData will write a NULL terminator in the output buffer, so you must leave room for it. You must also
- // include the NULL terminator in the buffer length passed to SQLGetData.
- //
- // ODBC generalization:
- // 1) Include NULL terminators in input buffer lengths.
- // 2) NULL terminators are not used in data lengths.
-
- ColumnInfo* pinfo = &cur->colinfos[iCol];
-
- // Some Unix ODBC drivers do not return the correct length.
- if (pinfo->sql_type == SQL_GUID)
- pinfo->column_size = 36;
-
- SQLSMALLINT nTargetType;
-
- switch (pinfo->sql_type)
- {
- case SQL_CHAR:
- case SQL_VARCHAR:
- case SQL_LONGVARCHAR:
- case SQL_GUID:
- case SQL_SS_XML:
- if (cur->cnxn->unicode_results)
- nTargetType = SQL_C_WCHAR;
- else
- nTargetType = SQL_C_CHAR;
- break;
-
- case SQL_WCHAR:
- case SQL_WVARCHAR:
- case SQL_WLONGVARCHAR:
- nTargetType = SQL_C_WCHAR;
- break;
-
- default:
- nTargetType = SQL_C_BINARY;
- break;
- }
-
- char tempBuffer[1024];
- DataBuffer buffer(nTargetType, tempBuffer, sizeof(tempBuffer));
-
- for (int iDbg = 0; iDbg < 10; iDbg++) // failsafe
- {
- SQLRETURN ret;
- SQLLEN cbData = 0;
-
- Py_BEGIN_ALLOW_THREADS
- ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), nTargetType, buffer.GetBuffer(), buffer.GetRemaining(), &cbData);
- Py_END_ALLOW_THREADS;
-
- if (cbData == SQL_NULL_DATA)
- Py_RETURN_NONE;
-
- if (!SQL_SUCCEEDED(ret) && ret != SQL_NO_DATA)
- return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt);
-
- // The SQLGetData behavior is incredibly quirky. It doesn't tell us the total, the total we've read, or even
- // the amount just read. It returns the amount just read, plus any remaining. Unfortunately, the only way to
- // pick them apart is to subtract out the amount of buffer we supplied.
-
- SQLLEN cbBuffer = buffer.GetRemaining(); // how much we gave SQLGetData
-
- if (ret == SQL_SUCCESS_WITH_INFO)
- {
- // There is more data than fits in the buffer. The amount of data equals the amount of data in the buffer
- // minus a NULL terminator.
-
- SQLLEN cbRead;
- SQLLEN cbMore;
-
- if (cbData == SQL_NO_TOTAL)
- {
- // We don't know how much more, so just guess.
- cbRead = cbBuffer - buffer.null_size;
- cbMore = 2048;
- }
- else if (cbData >= cbBuffer)
- {
- // There is more data. We supplied cbBuffer, but there was cbData (more). We received cbBuffer, so we
- // need to subtract that, allocate enough to read the rest (cbData-cbBuffer).
-
- cbRead = cbBuffer - buffer.null_size;
- cbMore = cbData - cbRead;
- }
- else
- {
- // I'm not really sure why I would be here ... I would have expected SQL_SUCCESS
- cbRead = cbData - buffer.null_size;
- cbMore = 0;
- }
-
- buffer.AddUsed(cbRead);
- if (!buffer.AllocateMore(cbMore))
- return PyErr_NoMemory();
- }
- else if (ret == SQL_SUCCESS)
- {
- // For some reason, the NULL terminator is used in intermediate buffers but not in this final one.
- buffer.AddUsed(cbData);
- }
-
- if (ret == SQL_SUCCESS || ret == SQL_NO_DATA)
- return buffer.DetachValue();
- }
-
- // REVIEW: Add an error message.
- return 0;
-}
-
-
-static PyObject*
-GetDataUser(Cursor* cur, Py_ssize_t iCol, int conv)
-{
- // conv
- // The index into the connection's user-defined conversions `conv_types`.
-
- PyObject* value = GetDataString(cur, iCol);
- if (value == 0)
- return 0;
-
- PyObject* result = PyObject_CallFunction(cur->cnxn->conv_funcs[conv], "(O)", value);
- Py_DECREF(value);
- return result;
-}
-
-
-static PyObject*
-GetDataBuffer(Cursor* cur, Py_ssize_t iCol)
-{
- PyObject* str = GetDataString(cur, iCol);
-
- if (str == Py_None)
- return str;
-
- PyObject* buffer = 0;
-
- if (str)
- {
- buffer = PyBuffer_FromObject(str, 0, PyString_GET_SIZE(str));
- Py_DECREF(str); // If no buffer, release it. If buffer, the buffer owns it.
- }
-
- return buffer;
-}
-
-static PyObject*
-GetDataDecimal(Cursor* cur, Py_ssize_t iCol)
-{
- // The SQL_NUMERIC_STRUCT support is hopeless (SQL Server ignores scale on input parameters and output columns), so
- // we'll rely on the Decimal's string parsing. Unfortunately, the Decimal author does not pay attention to the
- // locale, so we have to modify the string ourselves.
- //
- // Oracle inserts group separators (commas in US, periods in some countries), so leave room for that too.
-
- ColumnInfo* pinfo = &cur->colinfos[iCol];
-
- SQLLEN cbNeeded = (SQLLEN)(pinfo->column_size + 3 + // sign, decimal, NULL
- (pinfo->column_size / 3) + 2); // grouping. I believe this covers all cases.
-
- SQLLEN cbFetched = 0;
- char* sz = (char*)_alloca((size_t)cbNeeded);
-
- if (sz == 0)
- return PyErr_NoMemory();
-
- SQLRETURN ret;
- Py_BEGIN_ALLOW_THREADS
- ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_CHAR, sz, cbNeeded, &cbFetched);
- Py_END_ALLOW_THREADS
- if (!SQL_SUCCEEDED(ret))
- return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt);
-
- if (cbFetched == SQL_NULL_DATA)
- Py_RETURN_NONE;
-
- // The decimal class requires the decimal to be a period and does not allow thousands separators. Clean it up.
- //
- // Unfortunately this code only handles single-character values, which might be good enough for decimals and
- // separators, but is certainly not good enough for currency symbols.
- //
- // Note: cbFetched does not include the NULL terminator.
-
- for (int i = (int)(cbFetched - 1); i >=0; i--)
- {
- if (sz[i] == chGroupSeparator || sz[i] == '$' || sz[i] == chCurrencySymbol)
- {
- memmove(&sz[i], &sz[i] + 1, (size_t)(cbFetched - i));
- cbFetched--;
- }
- else if (sz[i] == chDecimal)
- {
- sz[i] = '.';
- }
- }
-
- return PyObject_CallFunction(decimal_type, "s", sz);
-}
-
-static PyObject*
-GetDataBit(Cursor* cur, Py_ssize_t iCol)
-{
- SQLCHAR ch;
- SQLLEN cbFetched;
- SQLRETURN ret;
-
- Py_BEGIN_ALLOW_THREADS
- ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_BIT, &ch, sizeof(ch), &cbFetched);
- Py_END_ALLOW_THREADS
-
- if (!SQL_SUCCEEDED(ret))
- return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt);
-
- if (cbFetched == SQL_NULL_DATA)
- Py_RETURN_NONE;
-
- if (ch == SQL_TRUE)
- Py_RETURN_TRUE;
-
- Py_RETURN_FALSE;
-}
-
-static PyObject*
-GetDataLong(Cursor* cur, Py_ssize_t iCol)
-{
- ColumnInfo* pinfo = &cur->colinfos[iCol];
-
- long value = 0;
- SQLLEN cbFetched = 0;
- SQLRETURN ret;
-
- SQLSMALLINT nCType = pinfo->is_unsigned ? SQL_C_ULONG : SQL_C_LONG;
-
- Py_BEGIN_ALLOW_THREADS
- ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), nCType, &value, sizeof(value), &cbFetched);
- Py_END_ALLOW_THREADS
- if (!SQL_SUCCEEDED(ret))
- return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt);
-
- if (cbFetched == SQL_NULL_DATA)
- Py_RETURN_NONE;
-
- if (pinfo->is_unsigned)
- return PyInt_FromLong(*(SQLINTEGER*)&value);
-
- return PyInt_FromLong(value);
-}
-
-static PyObject* GetDataLongLong(Cursor* cur, Py_ssize_t iCol)
-{
- ColumnInfo* pinfo = &cur->colinfos[iCol];
-
- SQLSMALLINT nCType = pinfo->is_unsigned ? SQL_C_UBIGINT : SQL_C_SBIGINT;
- SQLBIGINT value;
- SQLLEN cbFetched;
- SQLRETURN ret;
-
- Py_BEGIN_ALLOW_THREADS
- ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), nCType, &value, sizeof(value), &cbFetched);
- Py_END_ALLOW_THREADS
-
- if (!SQL_SUCCEEDED(ret))
- return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt);
-
- if (cbFetched == SQL_NULL_DATA)
- Py_RETURN_NONE;
-
- if (pinfo->is_unsigned)
- return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)(SQLUBIGINT)value);
-
- return PyLong_FromLongLong((PY_LONG_LONG)value);
-}
-
-static PyObject*
-GetDataDouble(Cursor* cur, Py_ssize_t iCol)
-{
- double value;
- SQLLEN cbFetched = 0;
- SQLRETURN ret;
-
- Py_BEGIN_ALLOW_THREADS
- ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_DOUBLE, &value, sizeof(value), &cbFetched);
- Py_END_ALLOW_THREADS
- if (!SQL_SUCCEEDED(ret))
- return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt);
-
- if (cbFetched == SQL_NULL_DATA)
- Py_RETURN_NONE;
-
- return PyFloat_FromDouble(value);
-}
-
-static PyObject*
-GetSqlServerTime(Cursor* cur, Py_ssize_t iCol)
-{
- SQL_SS_TIME2_STRUCT value;
-
- SQLLEN cbFetched = 0;
- SQLRETURN ret;
-
- Py_BEGIN_ALLOW_THREADS
- ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_BINARY, &value, sizeof(value), &cbFetched);
- Py_END_ALLOW_THREADS
- if (!SQL_SUCCEEDED(ret))
- return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt);
-
- if (cbFetched == SQL_NULL_DATA)
- Py_RETURN_NONE;
-
- int micros = (int)(value.fraction / 1000); // nanos --> micros
- return PyTime_FromTime(value.hour, value.minute, value.second, micros);
-}
-
-static PyObject*
-GetDataTimestamp(Cursor* cur, Py_ssize_t iCol)
-{
- TIMESTAMP_STRUCT value;
-
- SQLLEN cbFetched = 0;
- SQLRETURN ret;
-
- Py_BEGIN_ALLOW_THREADS
- ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_TYPE_TIMESTAMP, &value, sizeof(value), &cbFetched);
- Py_END_ALLOW_THREADS
- if (!SQL_SUCCEEDED(ret))
- return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt);
-
- if (cbFetched == SQL_NULL_DATA)
- Py_RETURN_NONE;
-
- switch (cur->colinfos[iCol].sql_type)
- {
- case SQL_TYPE_TIME:
- {
- int micros = (int)(value.fraction / 1000); // nanos --> micros
- return PyTime_FromTime(value.hour, value.minute, value.second, micros);
- }
-
- case SQL_TYPE_DATE:
- return PyDate_FromDate(value.year, value.month, value.day);
- }
-
- int micros = (int)(value.fraction / 1000); // nanos --> micros
- return PyDateTime_FromDateAndTime(value.year, value.month, value.day, value.hour, value.minute, value.second, micros);
-}
-
-int GetUserConvIndex(Cursor* cur, SQLSMALLINT sql_type)
-{
- // If this sql type has a user-defined conversion, the index into the connection's `conv_funcs` array is returned.
- // Otherwise -1 is returned.
-
- for (int i = 0; i < cur->cnxn->conv_count; i++)
- if (cur->cnxn->conv_types[i] == sql_type)
- return i;
- return -1;
-}
-
-
-PyObject*
-GetData(Cursor* cur, Py_ssize_t iCol)
-{
- // Returns an object representing the value in the row/field. If 0 is returned, an exception has already been set.
- //
- // The data is assumed to be the default C type for the column's SQL type.
-
- ColumnInfo* pinfo = &cur->colinfos[iCol];
-
- // First see if there is a user-defined conversion.
-
- int conv_index = GetUserConvIndex(cur, pinfo->sql_type);
- if (conv_index != -1)
- return GetDataUser(cur, iCol, conv_index);
-
- switch (pinfo->sql_type)
- {
- case SQL_WCHAR:
- case SQL_WVARCHAR:
- case SQL_WLONGVARCHAR:
- case SQL_CHAR:
- case SQL_VARCHAR:
- case SQL_LONGVARCHAR:
- case SQL_GUID:
- case SQL_SS_XML:
- return GetDataString(cur, iCol);
-
- case SQL_BINARY:
- case SQL_VARBINARY:
- case SQL_LONGVARBINARY:
- return GetDataBuffer(cur, iCol);
-
- case SQL_DECIMAL:
- case SQL_NUMERIC:
- {
- if (decimal_type == 0)
- break;
-
- return GetDataDecimal(cur, iCol);
- }
-
- case SQL_BIT:
- return GetDataBit(cur, iCol);
-
- case SQL_TINYINT:
- case SQL_SMALLINT:
- case SQL_INTEGER:
- return GetDataLong(cur, iCol);
-
- case SQL_BIGINT:
- return GetDataLongLong(cur, iCol);
-
- case SQL_REAL:
- case SQL_FLOAT:
- case SQL_DOUBLE:
- return GetDataDouble(cur, iCol);
-
-
- case SQL_TYPE_DATE:
- case SQL_TYPE_TIME:
- case SQL_TYPE_TIMESTAMP:
- return GetDataTimestamp(cur, iCol);
-
- case SQL_SS_TIME2:
- return GetSqlServerTime(cur, iCol);
- }
-
- return RaiseErrorV("HY106", ProgrammingError, "ODBC SQL type %d is not yet supported. column-index=%zd type=%d",
- (int)pinfo->sql_type, iCol, (int)pinfo->sql_type);
-}
+
+// The functions for reading a single value from the database using SQLGetData. There is a different function for
+// every data type.
+
+#include "pyodbc.h"
+#include "pyodbcmodule.h"
+#include "cursor.h"
+#include "connection.h"
+#include "errors.h"
+#include "dbspecific.h"
+#include "sqlwchar.h"
+#include "wrapper.h"
+#include <datetime.h>
+
+void GetData_init()
+{
+ PyDateTime_IMPORT;