././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1707151235.2462423 pyodbc-5.1.0/0000755000175100001770000000000014560207603012405 5ustar00runnerdocker././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/LICENSE.txt0000644000175100001770000000154414560207600014231 0ustar00runnerdockerPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/MANIFEST.in0000644000175100001770000000027214560207600014141 0ustar00runnerdockerinclude src/*.h include src/*.cpp include tests/* include README.* include LICENSE.txt # Include this file, needed for bdist_rpm include MANIFEST.in global-exclude *.py[cod] ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1707151235.2462423 pyodbc-5.1.0/PKG-INFO0000644000175100001770000000163414560207603013506 0ustar00runnerdockerMetadata-Version: 2.1 Name: pyodbc Version: 5.1.0 Summary: DB API Module for ODBC Home-page: https://github.com/mkleehammer/pyodbc Maintainer: Michael Kleehammer Maintainer-email: michael@kleehammer.com License: MIT Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Topic :: Database Requires-Python: >=3.8 License-File: LICENSE.txt pyodbc is an open source Python module that makes accessing ODBC databases simple. It implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) specification but is packed with even more Pythonic convenience. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/README.md0000644000175100001770000000346514560207600013671 0ustar00runnerdocker# pyodbc [![Windows build](https://ci.appveyor.com/api/projects/status/github/mkleehammer/pyodbc?branch=master&svg=true&passingText=Windows%20build&failingText=Windows%20build)](https://ci.appveyor.com/project/mkleehammer/pyodbc) [![Ubuntu build](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml/badge.svg)](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml) [![PyPI](https://img.shields.io/pypi/v/pyodbc?color=brightgreen)](https://pypi.org/project/pyodbc/) pyodbc is an open source Python module that makes accessing ODBC databases simple. It implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) specification but is packed with even more Pythonic convenience. The easiest way to install pyodbc is to use pip: python -m pip install pyodbc On Macs, you should probably install unixODBC first if you don't already have an ODBC driver manager installed. For example, using the [homebrew](https://brew.sh/) package manager: brew install unixodbc python -m pip install pyodbc Similarly, on Unix you should make sure you have an ODBC driver manager installed before installing pyodbc. See the [docs](https://github.com/mkleehammer/pyodbc/wiki/Install) for more information about how to do this on different Unix flavors. (On Windows, the ODBC driver manager is built-in.) Precompiled binary wheels are provided for multiple Python versions on most Windows, macOS, and Linux platforms. On other platforms pyodbc will be built from the source code. Note, pyodbc contains C++ extensions so you will need a suitable C++ compiler when building from source. See the [docs](https://github.com/mkleehammer/pyodbc/wiki/Install) for details. [Documentation](https://github.com/mkleehammer/pyodbc/wiki) [Release Notes](https://github.com/mkleehammer/pyodbc/releases) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/pyproject.toml0000644000175100001770000000250014560207600015313 0ustar00runnerdocker[project] name = "pyodbc" version = "5.1.0" requires-python = ">=3.8" # This is used by the GitHub action that builds release artifacts using cibuildwheel. # cibuildwheel reads this directly: # # https://cibuildwheel.readthedocs.io/en/stable/options/#requires-python description = "DB API module for ODBC" readme = "README.md" license = {text = "MIT License"} authors = [{name = "Michael Kleehammer", email="michael@kleehammer.com"}] maintainers = [{name = "Michael Kleehammer", email="michael@kleehammer.com"}] # There are a lot of contributors and I'd like to include everyone that puts in a lot of # effort, but is this for the more technical meaning of who makes builds? Would adding # contributors cause confusion. classifiers=['Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Topic :: Database', ] [project.urls] Homepage = "https://github.com/mkleehammer/pyodbc" [build-system] requires = ["setuptools"] build-backend = "setuptools.build_meta" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1707151235.2462423 pyodbc-5.1.0/setup.cfg0000644000175100001770000000004614560207603014226 0ustar00runnerdocker[egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/setup.py0000755000175100001770000001565114560207600014127 0ustar00runnerdocker#!/usr/bin/env python import sys, os, shlex, re from os.path import exists, join, isdir, relpath, expanduser from pathlib import Path from inspect import cleandoc from setuptools import setup from setuptools.extension import Extension def _getversion(): # CAREFUL: We need the version in this file so we can set it in a C macro to set # pyodbc.__version__, plus the setup function might require it. We also need it in the # toml file or cibuildwheel will fail. Instead of requiring a toml parser for older # versions of Python, we'll parse it out with a regexp, which is very simple. path = Path(__file__).parent / 'pyproject.toml' assert path.exists(), f'Cannot find {path}' text = path.read_text(encoding='utf8') m = re.search( r""" ^ \s* version \s*=\s* "([^"]+)" """, text, flags=re.VERBOSE | re.MULTILINE | re.IGNORECASE) if not m: sys.exit(f'Did not find version in {path}') return m.group(1) VERSION = _getversion() def main(): settings = get_compiler_settings() files = [relpath(join('src', f)) for f in os.listdir('src') if f.endswith('.cpp')] if exists('MANIFEST'): os.remove('MANIFEST') setup( name="pyodbc", version=VERSION, description="DB API Module for ODBC", long_description=cleandoc(""" pyodbc is an open source Python module that makes accessing ODBC databases simple. It implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) specification but is packed with even more Pythonic convenience."""), maintainer="Michael Kleehammer", maintainer_email="michael@kleehammer.com", url='https://github.com/mkleehammer/pyodbc', ext_modules=[Extension('pyodbc', sorted(files), **settings)], include_package_data=False, packages=[''], package_dir={'': 'src'}, package_data={'': ['pyodbc.pyi']}, # places pyodbc.pyi alongside pyodbc.{platform}.{pyd|so} in site-packages license='MIT', python_requires='>=3.8', classifiers=['Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Topic :: Database', ], options={ 'bdist_wininst': {'user_access_control': 'auto'} } ) def get_compiler_settings(): settings = { 'extra_compile_args': [], 'extra_link_args': [], 'libraries': [], 'include_dirs': [], 'define_macros': [('PYODBC_VERSION', VERSION)] } if os.name == 'nt': settings['extra_compile_args'].extend([ '/Wall', '/wd4514', # unreference inline function removed '/wd4820', # padding after struct member '/wd4668', # is not defined as a preprocessor macro '/wd4711', # function selected for automatic inline expansion '/wd4100', # unreferenced formal parameter '/wd4127', # "conditional expression is constant" testing compilation constants '/wd4191', # casts to PYCFunction which doesn't have the keywords parameter ]) if '--windbg' in sys.argv: # Used only temporarily to add some debugging flags to get better stack traces in # the debugger. This is not related to building debug versions of Python which use # "--debug". sys.argv.remove('--windbg') settings['extra_compile_args'].extend('/Od /Ge /GS /GZ /RTC1 /Wp64 /Yd'.split()) # Visual Studio 2019 defaults to using __CxxFrameHandler4 which is in # VCRUNTIME140_1.DLL which Python 3.7 and earlier are not linked to. This requirement # means pyodbc will not load unless the user has installed a UCRT update. Turn this # off to match the Python 3.7 settings. # # Unfortunately these are *hidden* settings. I guess we should be glad they actually # made the settings. # https://lectem.github.io/msvc/reverse-engineering/build/2019/01/21/MSVC-hidden-flags.html settings['extra_compile_args'].append('/d2FH4-') settings['extra_link_args'].append('/d2:-FH4-') settings['libraries'].append('odbc32') settings['libraries'].append('advapi32') elif os.environ.get("OS", '').lower().startswith('windows'): # Windows Cygwin (posix on windows) # OS name not windows, but still on Windows settings['libraries'].append('odbc32') elif sys.platform == 'darwin': # Python functions take a lot of 'char *' that really should be const. gcc complains # about this *a lot*. settings['extra_compile_args'].extend([ '-Wno-write-strings', '-Wno-deprecated-declarations' ]) # Homebrew installs odbc_config pipe = os.popen('odbc_config --cflags --libs 2>/dev/null') cflags, ldflags = pipe.readlines() exit_status = pipe.close() if exit_status is None: settings['extra_compile_args'].extend(shlex.split(cflags)) settings['extra_link_args'].extend(shlex.split(ldflags)) else: settings['libraries'].append('odbc') # Add directories for MacPorts and Homebrew. dirs = [ '/usr/local/include', '/opt/local/include', '/opt/homebrew/include', expanduser('~/homebrew/include'), ] settings['include_dirs'].extend(dir for dir in dirs if isdir(dir)) # unixODBC make/install places libodbc.dylib in /usr/local/lib/ by default # ( also OS/X since El Capitan prevents /usr/lib from being accessed ) settings['library_dirs'] = ['/usr/local/lib', '/opt/homebrew/lib'] else: # Other posix-like: Linux, Solaris, etc. # Python functions take a lot of 'char *' that really should be const. gcc complains # about this *a lot*. settings['extra_compile_args'].append('-Wno-write-strings') fd = os.popen('odbc_config --cflags 2>/dev/null') cflags = fd.read().strip() fd.close() if cflags: settings['extra_compile_args'].extend(cflags.split()) fd = os.popen('odbc_config --libs 2>/dev/null') ldflags = fd.read().strip() fd.close() if ldflags: settings['extra_link_args'].extend(ldflags.split()) # What is the proper way to detect iODBC, MyODBC, unixODBC, etc.? settings['libraries'].append('odbc') return settings if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1707151235.2422423 pyodbc-5.1.0/src/0000755000175100001770000000000014560207603013174 5ustar00runnerdocker././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/cnxninfo.cpp0000644000175100001770000001471214560207600015524 0ustar00runnerdocker// There is a bunch of information we want from connections which requires calls to SQLGetInfo when we first connect. // However, this isn't something we really want to do for every connection, so we cache it by the hash of the // connection string. When we create a new connection, we copy the values into the connection structure. // // We hash the connection string since it may contain sensitive information we wouldn't want exposed in a core dump. #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "cnxninfo.h" #include "connection.h" // Maps from a Python string of the SHA1 hash to a CnxnInfo object. // static PyObject* map_hash_to_info; static PyObject* hashlib; // The hashlib module static PyObject* update; // The string 'update', used in GetHash. bool CnxnInfo_init() { // Called during startup to give us a chance to import the hash code. If we can't find it, we'll print a warning // to the console and not cache anything. map_hash_to_info = PyDict_New(); update = PyUnicode_FromString("update"); if (!map_hash_to_info || !update) return false; hashlib = PyImport_ImportModule("hashlib"); if (!hashlib) return false; return true; } static PyObject* GetHash(PyObject* p) { Object bytes(PyUnicode_AsUTF8String(p)); if (!bytes) return 0; p = bytes.Get(); Object hash(PyObject_CallMethod(hashlib, "new", "s", "sha1")); if (!hash.IsValid()) return 0; Object result(PyObject_CallMethodObjArgs(hash, update, p, 0)); if (!result.IsValid()) return 0; return PyObject_CallMethod(hash, "hexdigest", 0); } inline void GetColumnSize(Connection* cnxn, SQLSMALLINT sqltype, int* psize) { // For some reason I can't seem to reuse the HSTMT multiple times in a row here. Until I // figure it out I'll simply allocate a new one each time. HSTMT hstmt; if (!SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_STMT, cnxn->hdbc, &hstmt))) return; SQLINTEGER columnsize; if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, sqltype)) && SQL_SUCCEEDED(SQLFetch(hstmt)) && SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0))) { // I believe some drivers are returning negative numbers for "unlimited" text fields, // such as FileMaker. Ignore anything that seems too small. if (columnsize >= 1) *psize = (int)columnsize; } SQLFreeStmt(hstmt, SQL_CLOSE); SQLFreeHandle(SQL_HANDLE_STMT, hstmt); } static PyObject* CnxnInfo_New(Connection* cnxn) { #ifdef _MSC_VER #pragma warning(disable : 4365) #endif CnxnInfo* p = PyObject_NEW(CnxnInfo, &CnxnInfoType); if (!p) return 0; Object info((PyObject*)p); // set defaults p->odbc_major = 0; p->odbc_minor = 0; p->supports_describeparam = false; p->datetime_precision = 19; // default: "yyyy-mm-dd hh:mm:ss" p->need_long_data_len = false; p->varchar_maxlength = 1 * 1024 * 1024 * 1024; p->wvarchar_maxlength = 1 * 1024 * 1024 * 1024; p->binary_maxlength = 1 * 1024 * 1024 * 1024; // WARNING: The GIL lock is released for the *entire* function here. Do not // touch any objects, call Python APIs, etc. We are simply making ODBC // calls and setting atomic values (ints & chars). Also, make sure the lock // gets reaquired -- do not add an early exit. SQLRETURN ret; Py_BEGIN_ALLOW_THREADS char szVer[20]; SQLSMALLINT cch = 0; ret = SQLGetInfo(cnxn->hdbc, SQL_DRIVER_ODBC_VER, szVer, _countof(szVer), &cch); if (SQL_SUCCEEDED(ret)) { char* dot = strchr(szVer, '.'); if (dot) { *dot = '\0'; p->odbc_major=(char)atoi(szVer); p->odbc_minor=(char)atoi(dot + 1); } } char szYN[2]; if (SQL_SUCCEEDED(SQLGetInfo(cnxn->hdbc, SQL_DESCRIBE_PARAMETER, szYN, _countof(szYN), &cch))) p->supports_describeparam = szYN[0] == 'Y'; if (SQL_SUCCEEDED(SQLGetInfo(cnxn->hdbc, SQL_NEED_LONG_DATA_LEN, szYN, _countof(szYN), &cch))) p->need_long_data_len = (szYN[0] == 'Y'); GetColumnSize(cnxn, SQL_VARCHAR, &p->varchar_maxlength); GetColumnSize(cnxn, SQL_WVARCHAR, &p->wvarchar_maxlength); GetColumnSize(cnxn, SQL_VARBINARY, &p->binary_maxlength); GetColumnSize(cnxn, SQL_TYPE_TIMESTAMP, &p->datetime_precision); Py_END_ALLOW_THREADS return info.Detach(); } PyObject* GetConnectionInfo(PyObject* pConnectionString, Connection* cnxn) { // Looks-up or creates a CnxnInfo object for the given connection string. The connection string can be a Unicode // or String object. Object hash(GetHash(pConnectionString)); if (hash.IsValid()) { PyObject* info = PyDict_GetItem(map_hash_to_info, hash); if (info) { Py_INCREF(info); return info; } } PyObject* info = CnxnInfo_New(cnxn); if (info != 0 && hash.IsValid()) PyDict_SetItem(map_hash_to_info, hash, info); return info; } PyTypeObject CnxnInfoType = { PyVarObject_HEAD_INIT(0, 0) "pyodbc.CnxnInfo", // tp_name sizeof(CnxnInfo), // tp_basicsize 0, // tp_itemsize 0, // destructor tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare 0, // tp_repr 0, // tp_as_number 0, // tp_as_sequence 0, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, // tp_flags }; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/cnxninfo.h0000644000175100001770000000353614560207600015173 0ustar00runnerdocker // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef CNXNINFO_H #define CNXNINFO_H struct Connection; extern PyTypeObject CnxnInfoType; struct CnxnInfo { PyObject_HEAD // The description of these fields is in the connection structure. char odbc_major; char odbc_minor; bool supports_describeparam; int datetime_precision; // Do we need to use SQL_LEN_DATA_AT_EXEC? Some drivers (e.g. FreeTDS 0.91) have problems with long values, so // we'll use SQL_DATA_AT_EXEC when possible. If this is true, however, we'll need to pass the length. bool need_long_data_len; // These are from SQLGetTypeInfo.column_size, so the char ones are in characters, not bytes. int varchar_maxlength; int wvarchar_maxlength; int binary_maxlength; }; bool CnxnInfo_init(); // Looks-up or creates a CnxnInfo object for the given connection string. The connection string can be a Unicode or // String object. PyObject* GetConnectionInfo(PyObject* pConnectionString, Connection* cnxn); #endif // CNXNINFO_H ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/connection.cpp0000644000175100001770000013102314560207600016034 0ustar00runnerdocker// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "connection.h" #include "cursor.h" #include "pyodbcmodule.h" #include "errors.h" #include "cnxninfo.h" static char connection_doc[] = "Connection objects manage connections to the database.\n" "\n" "Each manages a single ODBC HDBC."; static Connection* Connection_Validate(PyObject* self) { Connection* cnxn; if (self == 0 || !Connection_Check(self)) { PyErr_SetString(PyExc_TypeError, "Connection object required"); return 0; } cnxn = (Connection*)self; if (cnxn->hdbc == SQL_NULL_HANDLE) { PyErr_SetString(ProgrammingError, "Attempt to use a closed connection."); return 0; } return cnxn; } static char* StrDup(const char* text) { // Like StrDup but uses PyMem_Malloc for the memory. This is only used for internal // encodings which are known to be ASCII. size_t cb = strlen(text) + 1; char* pb = (char*)PyMem_Malloc(cb); if (!pb) { PyErr_NoMemory(); return 0; } memcpy(pb, text, cb); return pb; } static bool Connect(PyObject* pConnectString, HDBC hdbc, long timeout, Object& encoding) { assert(PyUnicode_Check(pConnectString)); SQLRETURN ret; if (timeout > 0) { Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttrW(hdbc, SQL_ATTR_LOGIN_TIMEOUT, (SQLPOINTER)(uintptr_t)timeout, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) RaiseErrorFromHandle(0, "SQLSetConnectAttr(SQL_ATTR_LOGIN_TIMEOUT)", hdbc, SQL_NULL_HANDLE); } const char* szEncoding = 0; Object encBytes; if (encoding) { if (PyUnicode_Check(encoding)) { szEncoding = PyUnicode_AsUTF8(encoding); } } SQLWChar cstring(pConnectString, szEncoding ? szEncoding : ENCSTR_UTF16NE); if (!cstring.isValid()) return false; Py_BEGIN_ALLOW_THREADS ret = SQLDriverConnectW(hdbc, 0, cstring, SQL_NTS, 0, 0, 0, SQL_DRIVER_NOPROMPT); Py_END_ALLOW_THREADS if (SQL_SUCCEEDED(ret)) return true; RaiseErrorFromHandle(0, "SQLDriverConnect", hdbc, SQL_NULL_HANDLE); return false; } static bool ApplyPreconnAttrs(HDBC hdbc, SQLINTEGER ikey, PyObject *value, char *strencoding) { SQLRETURN ret; SQLPOINTER ivalue = 0; SQLINTEGER vallen = 0; SQLWChar sqlchar; if (PyLong_Check(value)) { if (_PyLong_Sign(value) >= 0) { ivalue = (SQLPOINTER)PyLong_AsUnsignedLong(value); vallen = SQL_IS_UINTEGER; } else { ivalue = (SQLPOINTER)PyLong_AsLong(value); vallen = SQL_IS_INTEGER; } } else if (PyByteArray_Check(value)) { ivalue = (SQLPOINTER)PyByteArray_AsString(value); vallen = SQL_IS_POINTER; } else if (PyBytes_Check(value)) { ivalue = PyBytes_AsString(value); vallen = SQL_IS_POINTER; } else if (PyUnicode_Check(value)) { sqlchar.set(value, strencoding ? strencoding : "utf-16le"); ivalue = sqlchar.get(); vallen = SQL_NTS; } else if (PySequence_Check(value)) { // To allow for possibility of setting multiple attributes more than once. Py_ssize_t len = PySequence_Size(value); for (Py_ssize_t i = 0; i < len; i++) { Object v(PySequence_GetItem(value, i)); if (!ApplyPreconnAttrs(hdbc, ikey, v.Get(), strencoding)) return false; } return true; } else { RaiseErrorV(0, PyExc_TypeError, "Unsupported attrs_before type: %s", Py_TYPE(value)->tp_name); return false; } Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttrW(hdbc, ikey, ivalue, vallen); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(0, "SQLSetConnectAttr", hdbc, SQL_NULL_HANDLE); Py_BEGIN_ALLOW_THREADS SQLFreeHandle(SQL_HANDLE_DBC, hdbc); Py_END_ALLOW_THREADS return false; } return true; } PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, long timeout, bool fReadOnly, PyObject* attrs_before, Object& encoding) { // // Allocate HDBC and connect // Object attrs_before_o(attrs_before); HDBC hdbc = SQL_NULL_HANDLE; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLAllocHandle(SQL_HANDLE_DBC, henv, &hdbc); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(0, "SQLAllocHandle", SQL_NULL_HANDLE, SQL_NULL_HANDLE); // // Attributes that must be set before connecting. // if (attrs_before) { Py_ssize_t pos = 0; PyObject* key = 0; PyObject* value = 0; Object encodingholder; char *strencoding = encoding.Get() ? (PyUnicode_Check(encoding) ? PyBytes_AsString(encodingholder = PyCodec_Encode(encoding, "utf-8", "strict")) : PyBytes_Check(encoding) ? PyBytes_AsString(encoding) : 0) : 0; while (PyDict_Next(attrs_before, &pos, &key, &value)) { SQLINTEGER ikey = 0; if (PyLong_Check(key)) ikey = (int)PyLong_AsLong(key); if (!ApplyPreconnAttrs(hdbc, ikey, value, strencoding)) { return 0; } } } if (!Connect(pConnectString, hdbc, timeout, encoding)) { // Connect has already set an exception. Py_BEGIN_ALLOW_THREADS SQLFreeHandle(SQL_HANDLE_DBC, hdbc); Py_END_ALLOW_THREADS return 0; } // // Connected, so allocate the Connection object. // // Set all variables to something valid, so we don't crash in dealloc if this function fails. #ifdef _MSC_VER #pragma warning(disable : 4365) #endif Connection* cnxn = PyObject_NEW(Connection, &ConnectionType); #ifdef _MSC_VER #pragma warning(default : 4365) #endif if (cnxn == 0) { Py_BEGIN_ALLOW_THREADS SQLFreeHandle(SQL_HANDLE_DBC, hdbc); Py_END_ALLOW_THREADS return 0; } cnxn->hdbc = hdbc; cnxn->nAutoCommit = fAutoCommit ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF; cnxn->searchescape = 0; cnxn->maxwrite = 0; cnxn->timeout = 0; cnxn->map_sqltype_to_converter = 0; cnxn->attrs_before = attrs_before_o.Detach(); // This is an inefficient default, but should work all the time. When we are offered // single-byte text we don't actually know what the encoding is. For example, with SQL // Server the encoding is based on the database's collation. We ask the driver / DB to // convert to SQL_C_WCHAR and use the ODBC default of UTF-16LE. cnxn->sqlchar_enc.optenc = OPTENC_UTF16NE; cnxn->sqlchar_enc.name = StrDup(ENCSTR_UTF16NE); cnxn->sqlchar_enc.ctype = SQL_C_WCHAR; cnxn->sqlwchar_enc.optenc = OPTENC_UTF16NE; cnxn->sqlwchar_enc.name = StrDup(ENCSTR_UTF16NE); cnxn->sqlwchar_enc.ctype = SQL_C_WCHAR; cnxn->metadata_enc.optenc = OPTENC_UTF16NE; cnxn->metadata_enc.name = StrDup(ENCSTR_UTF16NE); cnxn->metadata_enc.ctype = SQL_C_WCHAR; // Note: I attempted to use UTF-8 here too since it can hold any type, but SQL Server fails // with a data truncation error if we send something encoded in 2 bytes to a column with 1 // character. I don't know if this is a bug in SQL Server's driver or if I'm missing // something, so we'll stay with the default ODBC conversions. cnxn->unicode_enc.optenc = OPTENC_UTF16NE; cnxn->unicode_enc.name = StrDup(ENCSTR_UTF16NE); cnxn->unicode_enc.ctype = SQL_C_WCHAR; if (!cnxn->sqlchar_enc.name || !cnxn->sqlwchar_enc.name || !cnxn->metadata_enc.name || !cnxn->unicode_enc.name) { PyErr_NoMemory(); Py_DECREF(cnxn); return 0; } // // Initialize autocommit mode. // // The DB API says we have to default to manual-commit, but ODBC defaults to auto-commit. We also provide a // keyword parameter that allows the user to override the DB API and force us to start in auto-commit (in which // case we don't have to do anything). if (fAutoCommit == false) { Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)cnxn->nAutoCommit, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLSetConnectAttr(SQL_ATTR_AUTOCOMMIT)", cnxn->hdbc, SQL_NULL_HANDLE); Py_DECREF(cnxn); return 0; } } if (fReadOnly) { Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_ACCESS_MODE, (SQLPOINTER)SQL_MODE_READ_ONLY, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLSetConnectAttr(SQL_ATTR_ACCESS_MODE)", cnxn->hdbc, SQL_NULL_HANDLE); Py_DECREF(cnxn); return 0; } } // // Gather connection-level information we'll need later. // Object info(GetConnectionInfo(pConnectString, cnxn)); if (!info.IsValid()) { Py_DECREF(cnxn); return 0; } CnxnInfo* p = (CnxnInfo*)info.Get(); cnxn->odbc_major = p->odbc_major; cnxn->odbc_minor = p->odbc_minor; cnxn->supports_describeparam = p->supports_describeparam; cnxn->datetime_precision = p->datetime_precision; cnxn->need_long_data_len = p->need_long_data_len; cnxn->varchar_maxlength = p->varchar_maxlength; cnxn->wvarchar_maxlength = p->wvarchar_maxlength; cnxn->binary_maxlength = p->binary_maxlength; return reinterpret_cast(cnxn); } static char set_attr_doc[] = "set_attr(attr_id, value) -> None\n\n" "Calls SQLSetConnectAttr with the given values.\n\n" "attr_id\n" " The attribute id (integer) to set. These are ODBC or driver constants.\n\n" "value\n" " An integer value.\n\n" "At this time, only integer values are supported and are always passed as SQLUINTEGER."; static PyObject* Connection_set_attr(PyObject* self, PyObject* args) { int id; int value; if (!PyArg_ParseTuple(args, "ii", &id, &value)) return 0; Connection* cnxn = (Connection*)self; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, id, (SQLPOINTER)(intptr_t)value, SQL_IS_INTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cnxn, "SQLSetConnectAttr", cnxn->hdbc, SQL_NULL_HANDLE); Py_RETURN_NONE; } static char conv_clear_doc[] = "clear_output_converters() --> None\n\n" "Remove all output converter functions."; static PyObject* Connection_conv_clear(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = (Connection*)self; Py_XDECREF(cnxn->map_sqltype_to_converter); cnxn->map_sqltype_to_converter = 0; Py_RETURN_NONE; } static int Connection_clear(PyObject* self) { // Internal method for closing the connection. (Not called close so it isn't confused with the external close // method.) Connection* cnxn = (Connection*)self; if (cnxn->hdbc != SQL_NULL_HANDLE) { TRACE("cnxn.clear cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); HDBC hdbc = cnxn->hdbc; cnxn->hdbc = SQL_NULL_HANDLE; Py_BEGIN_ALLOW_THREADS if (cnxn->nAutoCommit == SQL_AUTOCOMMIT_OFF) SQLEndTran(SQL_HANDLE_DBC, hdbc, SQL_ROLLBACK); SQLDisconnect(hdbc); SQLFreeHandle(SQL_HANDLE_DBC, hdbc); Py_END_ALLOW_THREADS } Py_XDECREF(cnxn->searchescape); cnxn->searchescape = 0; PyMem_Free((void*)cnxn->sqlchar_enc.name); cnxn->sqlchar_enc.name = 0; PyMem_Free((void*)cnxn->sqlwchar_enc.name); cnxn->sqlwchar_enc.name = 0; PyMem_Free((void*)cnxn->metadata_enc.name); cnxn->metadata_enc.name = 0; PyMem_Free((void*)cnxn->unicode_enc.name); cnxn->unicode_enc.name = 0; Py_XDECREF(cnxn->attrs_before); cnxn->attrs_before = 0; Py_XDECREF(cnxn->map_sqltype_to_converter); cnxn->map_sqltype_to_converter = 0; return 0; } static void Connection_dealloc(PyObject* self) { Connection_clear(self); PyObject_Del(self); } static char close_doc[] = "Close the connection now (rather than whenever __del__ is called).\n" "\n" "The connection will be unusable from this point forward and a ProgrammingError\n" "will be raised if any operation is attempted with the connection. The same\n" "applies to all cursor objects trying to use the connection.\n" "\n" "Note that closing a connection without committing the changes first will cause\n" "an implicit rollback to be performed."; static PyObject* Connection_close(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; Connection_clear(self); Py_RETURN_NONE; } static PyObject* Connection_cursor(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; return (PyObject*)Cursor_New(cnxn); } static PyObject* Connection_execute(PyObject* self, PyObject* args) { PyObject* result = 0; Cursor* cursor; Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; cursor = Cursor_New(cnxn); if (!cursor) return 0; result = Cursor_execute((PyObject*)cursor, args); Py_DECREF((PyObject*)cursor); return result; } enum { GI_YESNO, GI_STRING, GI_UINTEGER, GI_USMALLINT, }; struct GetInfoType { SQLUSMALLINT infotype; int datatype; // GI_XXX }; static const GetInfoType aInfoTypes[] = { // SQL_CONVERT_X { SQL_CONVERT_FUNCTIONS, GI_UINTEGER }, { SQL_CONVERT_BIGINT, GI_UINTEGER }, { SQL_CONVERT_BINARY, GI_UINTEGER }, { SQL_CONVERT_BIT, GI_UINTEGER }, { SQL_CONVERT_CHAR, GI_UINTEGER }, { SQL_CONVERT_DATE, GI_UINTEGER }, { SQL_CONVERT_DECIMAL, GI_UINTEGER }, { SQL_CONVERT_DOUBLE, GI_UINTEGER }, { SQL_CONVERT_FLOAT, GI_UINTEGER }, { SQL_CONVERT_INTEGER, GI_UINTEGER }, { SQL_CONVERT_LONGVARCHAR, GI_UINTEGER }, { SQL_CONVERT_NUMERIC, GI_UINTEGER }, { SQL_CONVERT_REAL, GI_UINTEGER }, { SQL_CONVERT_SMALLINT, GI_UINTEGER }, { SQL_CONVERT_TIME, GI_UINTEGER }, { SQL_CONVERT_TIMESTAMP, GI_UINTEGER }, { SQL_CONVERT_TINYINT, GI_UINTEGER }, { SQL_CONVERT_VARBINARY, GI_UINTEGER }, { SQL_CONVERT_VARCHAR, GI_UINTEGER }, { SQL_CONVERT_LONGVARBINARY, GI_UINTEGER }, { SQL_CONVERT_WCHAR, GI_UINTEGER }, { SQL_CONVERT_INTERVAL_DAY_TIME, GI_UINTEGER }, { SQL_CONVERT_INTERVAL_YEAR_MONTH, GI_UINTEGER }, { SQL_CONVERT_WLONGVARCHAR, GI_UINTEGER }, { SQL_CONVERT_WVARCHAR, GI_UINTEGER }, { SQL_CONVERT_GUID, GI_UINTEGER }, { SQL_ACCESSIBLE_PROCEDURES, GI_YESNO }, { SQL_ACCESSIBLE_TABLES, GI_YESNO }, { SQL_ACTIVE_ENVIRONMENTS, GI_USMALLINT }, { SQL_AGGREGATE_FUNCTIONS, GI_UINTEGER }, { SQL_ALTER_DOMAIN, GI_UINTEGER }, { SQL_ALTER_TABLE, GI_UINTEGER }, { SQL_ASYNC_MODE, GI_UINTEGER }, { SQL_BATCH_ROW_COUNT, GI_UINTEGER }, { SQL_BATCH_SUPPORT, GI_UINTEGER }, { SQL_BOOKMARK_PERSISTENCE, GI_UINTEGER }, { SQL_CATALOG_LOCATION, GI_USMALLINT }, { SQL_CATALOG_NAME, GI_YESNO }, { SQL_CATALOG_NAME_SEPARATOR, GI_STRING }, { SQL_CATALOG_TERM, GI_STRING }, { SQL_CATALOG_USAGE, GI_UINTEGER }, { SQL_COLLATION_SEQ, GI_STRING }, { SQL_COLUMN_ALIAS, GI_YESNO }, { SQL_CONCAT_NULL_BEHAVIOR, GI_USMALLINT }, { SQL_CORRELATION_NAME, GI_USMALLINT }, { SQL_CREATE_ASSERTION, GI_UINTEGER }, { SQL_CREATE_CHARACTER_SET, GI_UINTEGER }, { SQL_CREATE_COLLATION, GI_UINTEGER }, { SQL_CREATE_DOMAIN, GI_UINTEGER }, { SQL_CREATE_SCHEMA, GI_UINTEGER }, { SQL_CREATE_TABLE, GI_UINTEGER }, { SQL_CREATE_TRANSLATION, GI_UINTEGER }, { SQL_CREATE_VIEW, GI_UINTEGER }, { SQL_CURSOR_COMMIT_BEHAVIOR, GI_USMALLINT }, { SQL_CURSOR_ROLLBACK_BEHAVIOR, GI_USMALLINT }, { SQL_DATABASE_NAME, GI_STRING }, { SQL_DATA_SOURCE_NAME, GI_STRING }, { SQL_DATA_SOURCE_READ_ONLY, GI_YESNO }, { SQL_DATETIME_LITERALS, GI_UINTEGER }, { SQL_DBMS_NAME, GI_STRING }, { SQL_DBMS_VER, GI_STRING }, { SQL_DDL_INDEX, GI_UINTEGER }, { SQL_DEFAULT_TXN_ISOLATION, GI_UINTEGER }, { SQL_DESCRIBE_PARAMETER, GI_YESNO }, { SQL_DM_VER, GI_STRING }, { SQL_DRIVER_NAME, GI_STRING }, { SQL_DRIVER_ODBC_VER, GI_STRING }, { SQL_DRIVER_VER, GI_STRING }, { SQL_DROP_ASSERTION, GI_UINTEGER }, { SQL_DROP_CHARACTER_SET, GI_UINTEGER }, { SQL_DROP_COLLATION, GI_UINTEGER }, { SQL_DROP_DOMAIN, GI_UINTEGER }, { SQL_DROP_SCHEMA, GI_UINTEGER }, { SQL_DROP_TABLE, GI_UINTEGER }, { SQL_DROP_TRANSLATION, GI_UINTEGER }, { SQL_DROP_VIEW, GI_UINTEGER }, { SQL_DYNAMIC_CURSOR_ATTRIBUTES1, GI_UINTEGER }, { SQL_DYNAMIC_CURSOR_ATTRIBUTES2, GI_UINTEGER }, { SQL_EXPRESSIONS_IN_ORDERBY, GI_YESNO }, { SQL_FILE_USAGE, GI_USMALLINT }, { SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1, GI_UINTEGER }, { SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2, GI_UINTEGER }, { SQL_GETDATA_EXTENSIONS, GI_UINTEGER }, { SQL_GROUP_BY, GI_USMALLINT }, { SQL_IDENTIFIER_CASE, GI_USMALLINT }, { SQL_IDENTIFIER_QUOTE_CHAR, GI_STRING }, { SQL_INDEX_KEYWORDS, GI_UINTEGER }, { SQL_INFO_SCHEMA_VIEWS, GI_UINTEGER }, { SQL_INSERT_STATEMENT, GI_UINTEGER }, { SQL_INTEGRITY, GI_YESNO }, { SQL_KEYSET_CURSOR_ATTRIBUTES1, GI_UINTEGER }, { SQL_KEYSET_CURSOR_ATTRIBUTES2, GI_UINTEGER }, { SQL_KEYWORDS, GI_STRING }, { SQL_LIKE_ESCAPE_CLAUSE, GI_YESNO }, { SQL_MAX_ASYNC_CONCURRENT_STATEMENTS, GI_UINTEGER }, { SQL_MAX_BINARY_LITERAL_LEN, GI_UINTEGER }, { SQL_MAX_CATALOG_NAME_LEN, GI_USMALLINT }, { SQL_MAX_CHAR_LITERAL_LEN, GI_UINTEGER }, { SQL_MAX_COLUMNS_IN_GROUP_BY, GI_USMALLINT }, { SQL_MAX_COLUMNS_IN_INDEX, GI_USMALLINT }, { SQL_MAX_COLUMNS_IN_ORDER_BY, GI_USMALLINT }, { SQL_MAX_COLUMNS_IN_SELECT, GI_USMALLINT }, { SQL_MAX_COLUMNS_IN_TABLE, GI_USMALLINT }, { SQL_MAX_COLUMN_NAME_LEN, GI_USMALLINT }, { SQL_MAX_CONCURRENT_ACTIVITIES, GI_USMALLINT }, { SQL_MAX_CURSOR_NAME_LEN, GI_USMALLINT }, { SQL_MAX_DRIVER_CONNECTIONS, GI_USMALLINT }, { SQL_MAX_IDENTIFIER_LEN, GI_USMALLINT }, { SQL_MAX_INDEX_SIZE, GI_UINTEGER }, { SQL_MAX_PROCEDURE_NAME_LEN, GI_USMALLINT }, { SQL_MAX_ROW_SIZE, GI_UINTEGER }, { SQL_MAX_ROW_SIZE_INCLUDES_LONG, GI_YESNO }, { SQL_MAX_SCHEMA_NAME_LEN, GI_USMALLINT }, { SQL_MAX_STATEMENT_LEN, GI_UINTEGER }, { SQL_MAX_TABLES_IN_SELECT, GI_USMALLINT }, { SQL_MAX_TABLE_NAME_LEN, GI_USMALLINT }, { SQL_MAX_USER_NAME_LEN, GI_USMALLINT }, { SQL_MULTIPLE_ACTIVE_TXN, GI_YESNO }, { SQL_MULT_RESULT_SETS, GI_YESNO }, { SQL_NEED_LONG_DATA_LEN, GI_YESNO }, { SQL_NON_NULLABLE_COLUMNS, GI_USMALLINT }, { SQL_NULL_COLLATION, GI_USMALLINT }, { SQL_NUMERIC_FUNCTIONS, GI_UINTEGER }, { SQL_ODBC_INTERFACE_CONFORMANCE, GI_UINTEGER }, { SQL_ODBC_VER, GI_STRING }, { SQL_OJ_CAPABILITIES, GI_UINTEGER }, { SQL_ORDER_BY_COLUMNS_IN_SELECT, GI_YESNO }, { SQL_PARAM_ARRAY_ROW_COUNTS, GI_UINTEGER }, { SQL_PARAM_ARRAY_SELECTS, GI_UINTEGER }, { SQL_PROCEDURES, GI_YESNO }, { SQL_PROCEDURE_TERM, GI_STRING }, { SQL_QUOTED_IDENTIFIER_CASE, GI_USMALLINT }, { SQL_ROW_UPDATES, GI_YESNO }, { SQL_SCHEMA_TERM, GI_STRING }, { SQL_SCHEMA_USAGE, GI_UINTEGER }, { SQL_SCROLL_OPTIONS, GI_UINTEGER }, { SQL_SEARCH_PATTERN_ESCAPE, GI_STRING }, { SQL_SERVER_NAME, GI_STRING }, { SQL_SPECIAL_CHARACTERS, GI_STRING }, { SQL_SQL92_DATETIME_FUNCTIONS, GI_UINTEGER }, { SQL_SQL92_FOREIGN_KEY_DELETE_RULE, GI_UINTEGER }, { SQL_SQL92_FOREIGN_KEY_UPDATE_RULE, GI_UINTEGER }, { SQL_SQL92_GRANT, GI_UINTEGER }, { SQL_SQL92_NUMERIC_VALUE_FUNCTIONS, GI_UINTEGER }, { SQL_SQL92_PREDICATES, GI_UINTEGER }, { SQL_SQL92_RELATIONAL_JOIN_OPERATORS, GI_UINTEGER }, { SQL_SQL92_REVOKE, GI_UINTEGER }, { SQL_SQL92_ROW_VALUE_CONSTRUCTOR, GI_UINTEGER }, { SQL_SQL92_STRING_FUNCTIONS, GI_UINTEGER }, { SQL_SQL92_VALUE_EXPRESSIONS, GI_UINTEGER }, { SQL_SQL_CONFORMANCE, GI_UINTEGER }, { SQL_STANDARD_CLI_CONFORMANCE, GI_UINTEGER }, { SQL_STATIC_CURSOR_ATTRIBUTES1, GI_UINTEGER }, { SQL_STATIC_CURSOR_ATTRIBUTES2, GI_UINTEGER }, { SQL_STRING_FUNCTIONS, GI_UINTEGER }, { SQL_SUBQUERIES, GI_UINTEGER }, { SQL_SYSTEM_FUNCTIONS, GI_UINTEGER }, { SQL_TABLE_TERM, GI_STRING }, { SQL_TIMEDATE_ADD_INTERVALS, GI_UINTEGER }, { SQL_TIMEDATE_DIFF_INTERVALS, GI_UINTEGER }, { SQL_TIMEDATE_FUNCTIONS, GI_UINTEGER }, { SQL_TXN_CAPABLE, GI_USMALLINT }, { SQL_TXN_ISOLATION_OPTION, GI_UINTEGER }, { SQL_UNION, GI_UINTEGER }, { SQL_USER_NAME, GI_STRING }, { SQL_XOPEN_CLI_YEAR, GI_STRING }, }; static PyObject* Connection_getinfo(PyObject* self, PyObject* args) { Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; unsigned long infotype; if (!PyArg_ParseTuple(args, "k", &infotype)) return 0; unsigned int i = 0; for (; i < _countof(aInfoTypes); i++) { if (aInfoTypes[i].infotype == infotype) break; } if (i == _countof(aInfoTypes)) return RaiseErrorV(0, ProgrammingError, "Unsupported getinfo value: %d", infotype); char szBuffer[0x1000]; SQLSMALLINT cch = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetInfo(cnxn->hdbc, (SQLUSMALLINT)infotype, szBuffer, sizeof(szBuffer), &cch); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLGetInfo", cnxn->hdbc, SQL_NULL_HANDLE); return 0; } PyObject* result = 0; switch (aInfoTypes[i].datatype) { case GI_YESNO: result = (szBuffer[0] == 'Y') ? Py_True : Py_False; Py_INCREF(result); break; case GI_STRING: result = PyUnicode_FromStringAndSize(szBuffer, (Py_ssize_t)cch); break; case GI_UINTEGER: { SQLUINTEGER n = *(SQLUINTEGER*)szBuffer; // Does this work on PPC or do we need a union? result = PyLong_FromLong((long)n); break; } case GI_USMALLINT: result = PyLong_FromLong(*(SQLUSMALLINT*)szBuffer); break; } return result; } PyObject* Connection_endtrans(Connection* cnxn, SQLSMALLINT type) { // If called from Cursor.commit, it is possible that `cnxn` is deleted by another thread when we release them // below. (The cursor has had its reference incremented by the method it is calling, but nothing has incremented // the connections count. We could, but we really only need the HDBC.) HDBC hdbc = cnxn->hdbc; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLEndTran(SQL_HANDLE_DBC, hdbc, type); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLEndTran", hdbc, SQL_NULL_HANDLE); return 0; } Py_RETURN_NONE; } static PyObject* Connection_commit(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; TRACE("commit: cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); return Connection_endtrans(cnxn, SQL_COMMIT); } static PyObject* Connection_rollback(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; TRACE("rollback: cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); return Connection_endtrans(cnxn, SQL_ROLLBACK); } static char cursor_doc[] = "Return a new Cursor object using the connection."; static char execute_doc[] = "execute(sql, [params]) --> Cursor\n" "\n" "Create a new Cursor object, call its execute method, and return it. See\n" "Cursor.execute for more details.\n" "\n" "This is a convenience method that is not part of the DB API. Since a new\n" "Cursor is allocated by each call, this should not be used if more than one SQL\n" "statement needs to be executed."; static char commit_doc[] = "Commit any pending transaction to the database."; static char rollback_doc[] = "Causes the the database to roll back to the start of any pending transaction."; static char getinfo_doc[] = "getinfo(type) --> str | int | bool\n" "\n" "Calls SQLGetInfo, passing `type`, and returns the result formatted as a Python object."; PyObject* Connection_getautocommit(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; PyObject* result = (cnxn->nAutoCommit == SQL_AUTOCOMMIT_ON) ? Py_True : Py_False; Py_INCREF(result); return result; } static int Connection_setautocommit(PyObject* self, PyObject* value, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return -1; if (value == 0) { PyErr_SetString(PyExc_TypeError, "Cannot delete the autocommit attribute."); return -1; } uintptr_t nAutoCommit = PyObject_IsTrue(value) ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)nAutoCommit, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLSetConnectAttr", cnxn->hdbc, SQL_NULL_HANDLE); return -1; } cnxn->nAutoCommit = nAutoCommit; return 0; } static PyObject* Connection_getclosed(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn; if (self == 0 || !Connection_Check(self)) { PyErr_SetString(PyExc_TypeError, "Connection object required"); return 0; } cnxn = (Connection*)self; if (cnxn->hdbc == SQL_NULL_HANDLE) { Py_RETURN_TRUE; } Py_RETURN_FALSE; } static PyObject* Connection_getsearchescape(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn = (Connection*)self; if (!cnxn->searchescape) { char sz[8] = { 0 }; SQLSMALLINT cch = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetInfo(cnxn->hdbc, SQL_SEARCH_PATTERN_ESCAPE, &sz, _countof(sz), &cch); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cnxn, "SQLGetInfo", cnxn->hdbc, SQL_NULL_HANDLE); cnxn->searchescape = PyUnicode_FromStringAndSize(sz, (Py_ssize_t)cch); } Py_INCREF(cnxn->searchescape); return cnxn->searchescape; } static PyObject* Connection_getmaxwrite(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; return PyLong_FromSsize_t(cnxn->maxwrite); } static int Connection_setmaxwrite(PyObject* self, PyObject* value, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return -1; if (value == 0) { PyErr_SetString(PyExc_TypeError, "Cannot delete the maxwrite attribute."); return -1; } long maxwrite = PyLong_AsLong(value); if (PyErr_Occurred()) return -1; Py_ssize_t minval = 255; if (maxwrite != 0 && maxwrite < minval) { PyErr_Format(PyExc_ValueError, "Cannot set maxwrite less than %d unless setting to 0.", (int)minval); return -1; } cnxn->maxwrite = maxwrite; return 0; } static PyObject* Connection_gettimeout(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; return PyLong_FromLong(cnxn->timeout); } static int Connection_settimeout(PyObject* self, PyObject* value, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return -1; if (value == 0) { PyErr_SetString(PyExc_TypeError, "Cannot delete the timeout attribute."); return -1; } long timeout = PyLong_AsLong(value); if (timeout == -1 && PyErr_Occurred()) return -1; if (timeout < 0) { PyErr_SetString(PyExc_ValueError, "Cannot set a negative timeout."); return -1; } SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_CONNECTION_TIMEOUT, (SQLPOINTER)(uintptr_t)timeout, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLSetConnectAttr", cnxn->hdbc, SQL_NULL_HANDLE); return -1; } cnxn->timeout = timeout; return 0; } static bool _remove_converter(PyObject* self, SQLSMALLINT sqltype) { Connection* cnxn = (Connection*)self; if (!cnxn->map_sqltype_to_converter) { // There are no converters, so nothing to remove. return true; } Object n(PyLong_FromLong(sqltype)); if (!n.IsValid()) return false; if (!PyDict_Contains(cnxn->map_sqltype_to_converter, n.Get())) return true; return PyDict_DelItem(cnxn->map_sqltype_to_converter, n.Get()) == 0; } static bool _add_converter(PyObject* self, SQLSMALLINT sqltype, PyObject* func) { Connection* cnxn = (Connection*)self; if (!cnxn->map_sqltype_to_converter) { cnxn->map_sqltype_to_converter = PyDict_New(); if (!cnxn->map_sqltype_to_converter) return false; } Object n(PyLong_FromLong(sqltype)); if (!n.IsValid()) return false; return PyDict_SetItem(cnxn->map_sqltype_to_converter, n.Get(), func) != -1; } static char conv_add_doc[] = "add_output_converter(sqltype, func) --> None\n" "\n" "Register an output converter function that will be called whenever a value with\n" "the given SQL type is read from the database.\n" "\n" "sqltype\n" " The integer SQL type value to convert, which can be one of the defined\n" " standard constants (e.g. pyodbc.SQL_VARCHAR) or a database-specific value\n" " (e.g. -151 for the SQL Server 2008 geometry data type).\n" "\n" "func\n" " The converter function which will be called with a single parameter, the\n" " value, and should return the converted value. If the value is NULL, the\n" " parameter will be None. Otherwise it will be a " "bytes object.\n" "\n" "If func is None, any existing converter is removed." ; static PyObject* Connection_conv_add(PyObject* self, PyObject* args) { int sqltype; PyObject* func; if (!PyArg_ParseTuple(args, "iO", &sqltype, &func)) return 0; if (func != Py_None) { if (!_add_converter(self, (SQLSMALLINT)sqltype, func)) return 0; } else { if (!_remove_converter(self, (SQLSMALLINT)sqltype)) return 0; } Py_RETURN_NONE; } static char conv_remove_doc[] = "remove_output_converter(sqltype) --> None\n" "\n" "Remove an output converter function that was registered with\n" "add_output_converter. It is safe to call if no converter is\n" "registered for the type.\n" "\n" "sqltype\n" " The integer SQL type value being converted, which can be one of the defined\n" " standard constants (e.g. pyodbc.SQL_VARCHAR) or a database-specific value\n" " (e.g. -151 for the SQL Server 2008 geometry data type).\n" ; static PyObject* Connection_conv_remove(PyObject* self, PyObject* args) { int sqltype; if (!PyArg_ParseTuple(args, "i", &sqltype)) return 0; if (!_remove_converter(self, (SQLSMALLINT)sqltype)) return 0; Py_RETURN_NONE; } static char conv_get_doc[] = "get_output_converter(sqltype) --> \n" "\n" "Get the output converter function that was registered with\n" "add_output_converter. It is safe to call if no converter is\n" "registered for the type (returns None).\n" "\n" "sqltype\n" " The integer SQL type value being converted, which can be one of the defined\n" " standard constants (e.g. pyodbc.SQL_VARCHAR) or a database-specific value\n" " (e.g. -151 for the SQL Server 2008 geometry data type).\n" ; PyObject* Connection_GetConverter(Connection* cnxn, SQLSMALLINT type) { // This is our internal function. It returns a *borrowed* reference to the converter // function (so do not deference it). // // Returns 0 if (1) there is no converter for the type or (2) an error occurred. You'll // need to call PyErr_Occurred to differentiate. if (!cnxn->map_sqltype_to_converter) { Py_RETURN_NONE; } Object n(PyLong_FromLong(type)); if (!n.IsValid()) return 0; return PyDict_GetItem(cnxn->map_sqltype_to_converter, n.Get()); } static PyObject* Connection_conv_get(PyObject* self, PyObject* args) { int sqltype; if (!PyArg_ParseTuple(args, "i", &sqltype)) return 0; Connection* cnxn = (Connection*)self; PyObject* func = Connection_GetConverter(cnxn, (SQLSMALLINT)sqltype); if (func) { Py_INCREF(func); return func; } Py_RETURN_NONE; } static void NormalizeCodecName(const char* src, char* dest, size_t cbDest) { // Copies the codec name to dest, lowercasing it and replacing underscores with dashes. // (Same as _Py_normalize_encoding which is not public.) It also wraps the value with // pipes so we can search with it. // // UTF_8 --> |utf-8| // // This is an internal function - it will truncate so you should use a buffer bigger than // anything you expect to search for. char* pch = &dest[0]; char* pchLast = pch + cbDest - 2; // -2 -> leave room for pipe and null *pch++ = '|'; while (*src && pch < pchLast) { if (isupper(*src)) { *pch++ = (char)tolower(*src++); } else if (*src == '_') { *pch++ = '-'; src++; } else { *pch++ = *src++; } } *pch++ = '|'; *pch = '\0'; } static bool SetTextEncCommon(TextEnc& enc, const char* encoding, int ctype) { // Code common to setencoding and setdecoding. if (!encoding) { PyErr_Format(PyExc_ValueError, "encoding is required"); return false; } // Normalize the names so we don't have to worry about case or dashes vs underscores. // We'll lowercase everything and convert underscores to dashes. The results are then // surrounded with pipes so we can search strings. (See the `strstr` calls below.) char lower[30]; NormalizeCodecName(encoding, lower, sizeof(lower)); if (!PyCodec_KnownEncoding(encoding)) { PyErr_Format(PyExc_ValueError, "not a registered codec: '%s'", encoding); return false; } if (ctype != 0 && ctype != SQL_WCHAR && ctype != SQL_CHAR) { PyErr_Format(PyExc_ValueError, "Invalid ctype %d. Must be SQL_CHAR or SQL_WCHAR", ctype); return false; } char* cpy = StrDup(encoding); if (!cpy) { PyErr_NoMemory(); return false; } PyMem_Free((void*)enc.name); enc.name = cpy; if (strstr("|utf-8|utf8|", lower)) { enc.optenc = OPTENC_UTF8; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_CHAR); } else if (strstr("|utf-16|utf16|", lower)) { enc.optenc = OPTENC_UTF16; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_WCHAR); } else if (strstr("|utf-16-be|utf-16be|utf16be|", lower)) { enc.optenc = OPTENC_UTF16BE; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_WCHAR); } else if (strstr("|utf-16-le|utf-16le|utf16le|", lower)) { enc.optenc = OPTENC_UTF16LE; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_WCHAR); } else if (strstr("|utf-32|utf32|", lower)) { enc.optenc = OPTENC_UTF32; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_WCHAR); } else if (strstr("|utf-32-be|utf-32be|utf32be|", lower)) { enc.optenc = OPTENC_UTF32BE; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_WCHAR); } else if (strstr("|utf-32-le|utf-32le|utf32le|", lower)) { enc.optenc = OPTENC_UTF32LE; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_WCHAR); } else if (strstr("|latin-1|latin1|iso-8859-1|iso8859-1|", lower)) { enc.optenc = OPTENC_LATIN1; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_CHAR); } else { enc.optenc = OPTENC_NONE; enc.ctype = SQL_C_CHAR; } return true; } static PyObject* Connection_setencoding(PyObject* self, PyObject* args, PyObject* kwargs) { Connection* cnxn = (Connection*)self; // In Python 3 we only support encodings for Unicode text. char* encoding = 0; int ctype = 0; static char *kwlist[] = { "encoding", "ctype", 0 }; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|si", kwlist, &encoding, &ctype)) return 0; TextEnc& enc = cnxn->unicode_enc; if (!SetTextEncCommon(enc, encoding, ctype)) return 0; Py_RETURN_NONE; } static char setdecoding_doc[] = "setdecoding(sqltype, encoding=None, ctype=None) --> None\n" "\n" "Configures how text of type `ctype` (SQL_CHAR or SQL_WCHAR) is decoded\n" "when read from the database.\n" "\n" "When reading, the database will assign one of the sqltypes to text columns.\n" "pyodbc uses this lookup the decoding information set by this function.\n" "sqltype: pyodbc.SQL_CHAR or pyodbc.SQL_WCHAR\n\n" "encoding: A registered Python encoding such as \"utf-8\".\n\n" "ctype: The C data type should be requested. Set this to SQL_CHAR for\n" " single-byte encodings like UTF-8 and to SQL_WCHAR for two-byte encodings\n" " like UTF-16."; static PyObject* Connection_setdecoding(PyObject* self, PyObject* args, PyObject* kwargs) { Connection* cnxn = (Connection*)self; int sqltype; char* encoding = 0; int ctype = 0; static char *kwlist[] = {"sqltype", "encoding", "ctype", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|si", kwlist, &sqltype, &encoding, &ctype)) return 0; if (sqltype != SQL_WCHAR && sqltype != SQL_CHAR && sqltype != SQL_WMETADATA) return PyErr_Format(PyExc_ValueError, "Invalid sqltype %d. Must be SQL_CHAR or SQL_WCHAR or SQL_WMETADATA", sqltype); TextEnc& enc = (sqltype == SQL_CHAR) ? cnxn->sqlchar_enc : ((sqltype == SQL_WMETADATA) ? cnxn->metadata_enc : cnxn->sqlwchar_enc); if (!SetTextEncCommon(enc, encoding, ctype)) return 0; Py_RETURN_NONE; } static char enter_doc[] = "__enter__() -> self."; static PyObject* Connection_enter(PyObject* self, PyObject* args) { UNUSED(args); Py_INCREF(self); return self; } static char exit_doc[] = "__exit__(*excinfo) -> None. Commits the connection if necessary."; static PyObject* Connection_exit(PyObject* self, PyObject* args) { Connection* cnxn = (Connection*)self; // If an error has occurred, `args` will be a tuple of 3 values. Otherwise it will be a tuple of 3 `None`s. assert(PyTuple_Check(args)); if (cnxn->nAutoCommit == SQL_AUTOCOMMIT_OFF) { SQLSMALLINT CompletionType = (PyTuple_GetItem(args, 0) == Py_None) ? SQL_COMMIT : SQL_ROLLBACK; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLEndTran(SQL_HANDLE_DBC, cnxn->hdbc, CompletionType); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { const char* szFunc = (CompletionType == SQL_COMMIT) ? "SQLEndTran(SQL_COMMIT)" : "SQLEndTran(SQL_ROLLBACK)"; return RaiseErrorFromHandle(cnxn, szFunc, cnxn->hdbc, SQL_NULL_HANDLE); } } Py_RETURN_NONE; } static struct PyMethodDef Connection_methods[] = { { "cursor", Connection_cursor, METH_NOARGS, cursor_doc }, { "close", Connection_close, METH_NOARGS, close_doc }, { "execute", Connection_execute, METH_VARARGS, execute_doc }, { "commit", Connection_commit, METH_NOARGS, commit_doc }, { "rollback", Connection_rollback, METH_NOARGS, rollback_doc }, { "getinfo", Connection_getinfo, METH_VARARGS, getinfo_doc }, { "add_output_converter", Connection_conv_add, METH_VARARGS, conv_add_doc }, { "remove_output_converter", Connection_conv_remove, METH_VARARGS, conv_remove_doc }, { "get_output_converter", Connection_conv_get, METH_VARARGS, conv_get_doc }, { "clear_output_converters", Connection_conv_clear, METH_NOARGS, conv_clear_doc }, { "setdecoding", (PyCFunction)Connection_setdecoding, METH_VARARGS|METH_KEYWORDS, setdecoding_doc }, { "setencoding", (PyCFunction)Connection_setencoding, METH_VARARGS|METH_KEYWORDS, 0 }, { "set_attr", Connection_set_attr, METH_VARARGS, set_attr_doc }, { "__enter__", Connection_enter, METH_NOARGS, enter_doc }, { "__exit__", Connection_exit, METH_VARARGS, exit_doc }, { 0, 0, 0, 0 } }; static PyGetSetDef Connection_getseters[] = { { "closed", (getter)Connection_getclosed, 0, "Returns True if the connection is closed; False otherwise.", 0}, { "searchescape", (getter)Connection_getsearchescape, 0, "The ODBC search pattern escape character, as returned by\n" "SQLGetInfo(SQL_SEARCH_PATTERN_ESCAPE). These are driver specific.", 0 }, { "autocommit", Connection_getautocommit, Connection_setautocommit, "Returns True if the connection is in autocommit mode; False otherwise.", 0 }, { "timeout", Connection_gettimeout, Connection_settimeout, "The timeout in seconds, zero means no timeout.", 0 }, { "maxwrite", Connection_getmaxwrite, Connection_setmaxwrite, "The maximum bytes to write before using SQLPutData.", 0 }, { 0 } }; PyTypeObject ConnectionType = { PyVarObject_HEAD_INIT(0, 0) "pyodbc.Connection", // tp_name sizeof(Connection), // tp_basicsize 0, // tp_itemsize Connection_dealloc, // destructor tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare 0, // tp_repr 0, // tp_as_number 0, // tp_as_sequence 0, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, // tp_flags connection_doc, // tp_doc 0, // tp_traverse 0, // tp_clear 0, // tp_richcompare 0, // tp_weaklistoffset 0, // tp_iter 0, // tp_iternext Connection_methods, // tp_methods 0, // tp_members Connection_getseters, // tp_getset 0, // tp_base 0, // tp_dict 0, // tp_descr_get 0, // tp_descr_set 0, // tp_dictoffset 0, // tp_init 0, // tp_alloc 0, // tp_new 0, // tp_free 0, // tp_is_gc 0, // tp_bases 0, // tp_mro 0, // tp_cache 0, // tp_subclasses 0, // tp_weaklist }; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/connection.h0000644000175100001770000001141114560207600015477 0ustar00runnerdocker // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef CONNECTION_H #define CONNECTION_H struct Cursor; extern PyTypeObject ConnectionType; struct TextEnc; struct Connection { PyObject_HEAD // Set to SQL_NULL_HANDLE when the connection is closed. HDBC hdbc; // Will be SQL_AUTOCOMMIT_ON or SQL_AUTOCOMMIT_OFF. uintptr_t nAutoCommit; // The ODBC version the driver supports, from SQLGetInfo(DRIVER_ODBC_VER). This is set after connecting. char odbc_major; char odbc_minor; // The escape character from SQLGetInfo. This is not initialized until requested, so this may be zero! PyObject* searchescape; // Will be true if SQLDescribeParam is supported. If false, we'll have to guess but the user will not be able // to insert NULLs into binary columns. bool supports_describeparam; // The column size of datetime columns, obtained from SQLGetInfo(), used to determine the datetime precision. int datetime_precision; // The connection timeout in seconds. long timeout; // Pointer connection attributes may require that the pointed-to object be kept // valid until some unspecified time in the future, so keep them here for now. PyObject* attrs_before; TextEnc sqlchar_enc; // encoding used when reading SQL_CHAR data TextEnc sqlwchar_enc; // encoding used when reading SQL_WCHAR data TextEnc unicode_enc; // encoding used when writing unicode strings TextEnc metadata_enc; // Used when reading column names for Cursor.description. I originally thought I could use // the TextEncs above based on whether I called SQLDescribeCol vs SQLDescribeColW. // Unfortunately it looks like PostgreSQL and MySQL (and probably others) ignore the ODBC // specification regarding encoding everywhere *except* in these functions - SQLDescribeCol // seems to always return UTF-16LE by them regardless of the connection settings. long maxwrite; // Used to override varchar_maxlength, etc. Those are initialized from // SQLGetTypeInfo but some drivers (e.g. psqlodbc) return almost arbitrary // values (like 255 chars) leading to very slow insert performance (lots of // small calls to SQLPutData). If this is zero the values from // SQLGetTypeInfo are used. Otherwise this value is used. // These are copied from cnxn info for performance and convenience. int varchar_maxlength; int wvarchar_maxlength; int binary_maxlength; SQLLEN GetMaxLength(SQLSMALLINT ctype) const { assert(ctype == SQL_C_BINARY || ctype == SQL_C_WCHAR || ctype == SQL_C_CHAR); if (maxwrite != 0) return maxwrite; if (ctype == SQL_C_BINARY) return binary_maxlength; if (ctype == SQL_C_WCHAR) return wvarchar_maxlength; return varchar_maxlength; } bool need_long_data_len; PyObject* map_sqltype_to_converter; // If converters are defined, this will be a dictionary mapping from the SQLTYPE cast to an // int (because types can be negative) to the converter function. // // Unfortunately each lookup requires creating a Python object. To bypass this when output // converters are not used, we keep this pointer null until the first converter is added, // which is fast to check. }; #define Connection_Check(op) PyObject_TypeCheck(op, &ConnectionType) #define Connection_CheckExact(op) (Py_TYPE(op) == &ConnectionType) /* * Used by the module's connect function to create new connection objects. If unable to connect to the database, an * exception is set and zero is returned. */ PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, long timeout, bool fReadOnly, PyObject* attrs_before, Object& encoding); /* * Used by the Cursor to implement commit and rollback. */ PyObject* Connection_endtrans(Connection* cnxn, SQLSMALLINT type); PyObject* Connection_GetConverter(Connection* cnxn, SQLSMALLINT type); #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/cursor.cpp0000644000175100001770000024550714560207600015227 0ustar00runnerdocker// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // Note: This project has gone from C++ (when it was ported from pypgdb) to C, back to C++ (where it will stay). If // you are making modifications, feel free to move variable declarations from the top of functions to where they are // actually used. #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "cursor.h" #include "pyodbcmodule.h" #include "connection.h" #include "row.h" #include "params.h" #include "errors.h" #include "getdata.h" #include "dbspecific.h" #include enum { CURSOR_REQUIRE_CNXN = 0x00000001, CURSOR_REQUIRE_OPEN = 0x00000003, // includes _CNXN CURSOR_REQUIRE_RESULTS = 0x00000007, // includes _OPEN CURSOR_RAISE_ERROR = 0x00000010, }; inline bool StatementIsValid(Cursor* cursor) { return cursor->cnxn != 0 && ((Connection*)cursor->cnxn)->hdbc != SQL_NULL_HANDLE && cursor->hstmt != SQL_NULL_HANDLE; } extern PyTypeObject CursorType; inline bool Cursor_Check(PyObject* o) { return o != 0 && Py_TYPE(o) == &CursorType; } static Cursor* Cursor_Validate(PyObject* obj, DWORD flags) { // Validates that a PyObject is a Cursor (like Cursor_Check) and optionally some other requirements controlled by // `flags`. If valid and all requirements (from the flags) are met, the cursor is returned, cast to Cursor*. // Otherwise zero is returned. // // Designed to be used at the top of methods to convert the PyObject pointer and perform necessary checks. // // Valid flags are from the CURSOR_ enum above. Note that unless CURSOR_RAISE_ERROR is supplied, an exception // will not be set. (When deallocating, we really don't want an exception.) Connection* cnxn = 0; Cursor* cursor = 0; if (!Cursor_Check(obj)) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "Invalid cursor object."); return 0; } cursor = (Cursor*)obj; cnxn = (Connection*)cursor->cnxn; if (cnxn == 0) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "Attempt to use a closed cursor."); return 0; } if (IsSet(flags, CURSOR_REQUIRE_OPEN)) { if (cursor->hstmt == SQL_NULL_HANDLE) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "Attempt to use a closed cursor."); return 0; } if (cnxn->hdbc == SQL_NULL_HANDLE) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "The cursor's connection has been closed."); return 0; } } if (IsSet(flags, CURSOR_REQUIRE_RESULTS) && cursor->colinfos == 0) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "No results. Previous SQL was not a query."); return 0; } return cursor; } inline bool IsNumericType(SQLSMALLINT sqltype) { switch (sqltype) { case SQL_DECIMAL: case SQL_NUMERIC: case SQL_REAL: case SQL_FLOAT: case SQL_DOUBLE: case SQL_SMALLINT: case SQL_INTEGER: case SQL_TINYINT: case SQL_BIGINT: return true; } return false; } static bool create_name_map(Cursor* cur, SQLSMALLINT field_count, bool lower) { // Called after an execute to construct the map shared by rows. bool success = false; PyObject *desc = 0, *colmap = 0, *colinfo = 0, *type = 0, *index = 0, *nullable_obj=0; SQLSMALLINT nameLen = 300; uint16_t *szName = NULL; SQLRETURN ret; assert(cur->hstmt != SQL_NULL_HANDLE && cur->colinfos != 0); // These are the values we expect after free_results. If this function fails, we do not modify any members, so // they should be set to something Cursor_close can deal with. assert(cur->description == Py_None); assert(cur->map_name_to_index == 0); if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } desc = PyTuple_New((Py_ssize_t)field_count); colmap = PyDict_New(); szName = (uint16_t*) PyMem_Malloc((nameLen + 1) * sizeof(uint16_t)); if (!desc || !colmap || !szName) goto done; for (int i = 0; i < field_count; i++) { SQLSMALLINT cchName; SQLSMALLINT nDataType; SQLULEN nColSize; // precision SQLSMALLINT cDecimalDigits; // scale SQLSMALLINT nullable; retry: Py_BEGIN_ALLOW_THREADS ret = SQLDescribeColW(cur->hstmt, (SQLUSMALLINT)(i + 1), (SQLWCHAR*)szName, nameLen, &cchName, &nDataType, &nColSize, &cDecimalDigits, &nullable); Py_END_ALLOW_THREADS if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); goto done; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cur->cnxn, "SQLDescribeCol", cur->cnxn->hdbc, cur->hstmt); goto done; } // If needed, allocate a bigger column name message buffer and retry. if (cchName > nameLen - 1) { nameLen = cchName + 1; if (!PyMem_Realloc((BYTE**) &szName, (nameLen + 1) * sizeof(uint16_t))) { PyErr_NoMemory(); goto done; } goto retry; } const TextEnc& enc = cur->cnxn->metadata_enc; // HACK: I don't know the exact issue, but iODBC + Teradata results in either UCS4 data // or 4-byte SQLWCHAR. I'm going to use UTF-32 as an indication that's what we have. Py_ssize_t cbName = cchName; switch (enc.optenc) { case OPTENC_UTF32: case OPTENC_UTF32LE: case OPTENC_UTF32BE: cbName *= 4; break; default: if (enc.ctype == SQL_C_WCHAR) cbName *= 2; break; } TRACE("Col %d: type=%s (%d) colsize=%d\n", (i+1), SqlTypeName(nDataType), (int)nDataType, (int)nColSize); Object name(TextBufferToObject(enc, (byte*)szName, cbName)); if (!name) goto done; if (lower) { PyObject* l = PyObject_CallMethod(name, "lower", 0); if (!l) goto done; name.Attach(l); } type = PythonTypeFromSqlType(cur, nDataType); if (!type) goto done; switch (nullable) { case SQL_NO_NULLS: nullable_obj = Py_False; break; case SQL_NULLABLE: nullable_obj = Py_True; break; case SQL_NULLABLE_UNKNOWN: default: nullable_obj = Py_None; break; } // The Oracle ODBC driver has a bug (I call it) that it returns a data size of 0 when a numeric value is // retrieved from a UNION: http://support.microsoft.com/?scid=kb%3Ben-us%3B236786&x=13&y=6 // // Unfortunately, I don't have a test system for this yet, so I'm *trying* something. (Not a good sign.) If // the size is zero and it appears to be a numeric type, we'll try to come up with our own length using any // other data we can get. if (nColSize == 0 && IsNumericType(nDataType)) { // I'm not sure how if (cDecimalDigits != 0) { nColSize = (SQLUINTEGER)(cDecimalDigits + 3); } else { // I'm not sure if this is a good idea, but ... nColSize = 42; } } colinfo = Py_BuildValue("(OOOiiiO)", name.Get(), type, // type_code Py_None, // display size (int)nColSize, // internal_size (int)nColSize, // precision (int)cDecimalDigits, // scale nullable_obj); // null_ok if (!colinfo) goto done; nullable_obj = 0; index = PyLong_FromLong(i); if (!index) goto done; PyDict_SetItem(colmap, name.Get(), index); Py_DECREF(index); // SetItemString increments index = 0; PyTuple_SET_ITEM(desc, i, colinfo); colinfo = 0; // reference stolen by SET_ITEM } Py_XDECREF(cur->description); cur->description = desc; desc = 0; cur->map_name_to_index = colmap; colmap = 0; success = true; done: Py_XDECREF(nullable_obj); Py_XDECREF(desc); Py_XDECREF(colmap); Py_XDECREF(index); Py_XDECREF(colinfo); PyMem_Free(szName); return success; } enum free_results_flags { FREE_STATEMENT = 0x01, KEEP_STATEMENT = 0x02, FREE_PREPARED = 0x04, KEEP_PREPARED = 0x08, KEEP_MESSAGES = 0x10, STATEMENT_MASK = 0x03, PREPARED_MASK = 0x0C }; static bool free_results(Cursor* self, int flags) { // Internal function called any time we need to free the memory associated with query results. It is safe to call // this even when a query has not been executed. // If we ran out of memory, it is possible that we have a cursor but colinfos is zero. However, we should be // deleting this object, so the cursor will be freed when the HSTMT is destroyed. */ assert((flags & STATEMENT_MASK) != 0); assert((flags & PREPARED_MASK) != 0); if ((flags & PREPARED_MASK) == FREE_PREPARED) { Py_XDECREF(self->pPreparedSQL); self->pPreparedSQL = 0; } if (self->colinfos) { PyMem_Free(self->colinfos); self->colinfos = 0; } if (StatementIsValid(self)) { if ((flags & STATEMENT_MASK) == FREE_STATEMENT) { Py_BEGIN_ALLOW_THREADS SQLFreeStmt(self->hstmt, SQL_CLOSE); Py_END_ALLOW_THREADS; } else { Py_BEGIN_ALLOW_THREADS SQLFreeStmt(self->hstmt, SQL_UNBIND); SQLFreeStmt(self->hstmt, SQL_RESET_PARAMS); Py_END_ALLOW_THREADS; } if (self->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } } if (self->description != Py_None) { Py_DECREF(self->description); self->description = Py_None; Py_INCREF(Py_None); } if (self->map_name_to_index) { Py_DECREF(self->map_name_to_index); self->map_name_to_index = 0; } if ((flags & KEEP_MESSAGES) == 0) { Py_XDECREF(self->messages); self->messages = PyList_New(0); } self->rowcount = -1; return true; } static void closeimpl(Cursor* cur) { // An internal function for the shared 'closing' code used by Cursor_close and Cursor_dealloc. // // This method releases the GIL lock while closing, so verify the HDBC still exists if you use it. free_results(cur, FREE_STATEMENT | FREE_PREPARED); FreeParameterData(cur); FreeParameterInfo(cur); if (StatementIsValid(cur)) { HSTMT hstmt = cur->hstmt; cur->hstmt = SQL_NULL_HANDLE; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLFreeHandle(SQL_HANDLE_STMT, hstmt); Py_END_ALLOW_THREADS // If there is already an exception, don't overwrite it. if (!SQL_SUCCEEDED(ret) && !PyErr_Occurred()) RaiseErrorFromHandle(cur->cnxn, "SQLFreeHandle", cur->cnxn->hdbc, SQL_NULL_HANDLE); } Py_XDECREF(cur->pPreparedSQL); Py_XDECREF(cur->description); Py_XDECREF(cur->map_name_to_index); Py_XDECREF(cur->cnxn); Py_XDECREF(cur->messages); cur->pPreparedSQL = 0; cur->description = 0; cur->map_name_to_index = 0; cur->cnxn = 0; cur->messages = 0; } static char close_doc[] = "Close the cursor now (rather than whenever __del__ is called). The cursor will\n" "be unusable from this point forward; a ProgrammingError exception will be\n" "raised if any operation is attempted with the cursor."; static PyObject* Cursor_close(PyObject* self, PyObject* args) { UNUSED(args); Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; closeimpl(cursor); if (PyErr_Occurred()) return 0; Py_INCREF(Py_None); return Py_None; } static void Cursor_dealloc(Cursor* cursor) { if (Cursor_Validate((PyObject*)cursor, CURSOR_REQUIRE_CNXN)) { closeimpl(cursor); } Py_XDECREF(cursor->inputsizes); PyObject_Del(cursor); } bool InitColumnInfo(Cursor* cursor, SQLUSMALLINT iCol, ColumnInfo* pinfo) { // Initializes ColumnInfo from result set metadata. SQLRETURN ret; // REVIEW: This line fails on OS/X with the FileMaker driver : http://www.filemaker.com/support/updaters/xdbc_odbc_mac.html // // I suspect the problem is that it doesn't allow NULLs in some of the parameters, so I'm going to supply them all // to see what happens. SQLCHAR ColumnName[200]; SQLSMALLINT BufferLength = _countof(ColumnName); SQLSMALLINT NameLength = 0; SQLSMALLINT DataType = 0; SQLULEN ColumnSize = 0; SQLSMALLINT DecimalDigits = 0; SQLSMALLINT Nullable = 0; Py_BEGIN_ALLOW_THREADS ret = SQLDescribeCol(cursor->hstmt, iCol, ColumnName, BufferLength, &NameLength, &DataType, &ColumnSize, &DecimalDigits, &Nullable); Py_END_ALLOW_THREADS pinfo->sql_type = DataType; pinfo->column_size = ColumnSize; if (cursor->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cursor->cnxn, "SQLDescribeCol", cursor->cnxn->hdbc, cursor->hstmt); return false; } // If it is an integer type, determine if it is signed or unsigned. The buffer size is the same but we'll need to // know when we convert to a Python integer. switch (pinfo->sql_type) { case SQL_TINYINT: case SQL_SMALLINT: case SQL_INTEGER: case SQL_BIGINT: { SQLLEN f; Py_BEGIN_ALLOW_THREADS ret = SQLColAttribute(cursor->hstmt, iCol, SQL_DESC_UNSIGNED, 0, 0, 0, &f); Py_END_ALLOW_THREADS if (cursor->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cursor->cnxn, "SQLColAttribute", cursor->cnxn->hdbc, cursor->hstmt); return false; } pinfo->is_unsigned = (f == SQL_TRUE); break; } default: pinfo->is_unsigned = false; } return true; } static bool PrepareResults(Cursor* cur, int cCols) { // Called after a SELECT has been executed to perform pre-fetch work. // // Allocates the ColumnInfo structures describing the returned data. int i; assert(cur->colinfos == 0); cur->colinfos = (ColumnInfo*)PyMem_Malloc(sizeof(ColumnInfo) * cCols); if (cur->colinfos == 0) { PyErr_NoMemory(); return false; } for (i = 0; i < cCols; i++) { if (!InitColumnInfo(cur, (SQLUSMALLINT)(i + 1), &cur->colinfos[i])) { PyMem_Free(cur->colinfos); cur->colinfos = 0; return false; } } return true; } int GetDiagRecs(Cursor* cur) { // Retrieves all diagnostic records from the cursor and assigns them to the "messages" attribute. PyObject* msg_list; // the "messages" as a Python list of diagnostic records SQLSMALLINT iRecNumber = 1; // the index of the diagnostic records (1-based) uint16_t cSQLState[6]; // five-character SQLSTATE code (plus terminating NULL) SQLINTEGER iNativeError; SQLSMALLINT iMessageLen = 1023; uint16_t *cMessageText = (uint16_t*) PyMem_Malloc((iMessageLen + 1) * sizeof(uint16_t)); SQLSMALLINT iTextLength; SQLRETURN ret; char sqlstate_ascii[6] = ""; // ASCII version of the SQLState if (!cMessageText) { PyErr_NoMemory(); return 0; } msg_list = PyList_New(0); if (!msg_list) return 0; for (;;) { cSQLState[0] = 0; iNativeError = 0; cMessageText[0] = 0; iTextLength = 0; Py_BEGIN_ALLOW_THREADS ret = SQLGetDiagRecW( SQL_HANDLE_STMT, cur->hstmt, iRecNumber, (SQLWCHAR*)cSQLState, &iNativeError, (SQLWCHAR*)cMessageText, iMessageLen, &iTextLength ); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) break; // If needed, allocate a bigger error message buffer and retry. if (iTextLength > iMessageLen - 1) { iMessageLen = iTextLength + 1; if (!PyMem_Realloc((BYTE**) &cMessageText, (iMessageLen + 1) * sizeof(uint16_t))) { PyMem_Free(cMessageText); PyErr_NoMemory(); return 0; } Py_BEGIN_ALLOW_THREADS ret = SQLGetDiagRecW( SQL_HANDLE_STMT, cur->hstmt, iRecNumber, (SQLWCHAR*)cSQLState, &iNativeError, (SQLWCHAR*)cMessageText, iMessageLen, &iTextLength ); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) break; } cSQLState[5] = 0; // Not always NULL terminated (MS Access) CopySqlState(cSQLState, sqlstate_ascii); PyObject* msg_class = PyUnicode_FromFormat("[%s] (%ld)", sqlstate_ascii, (long)iNativeError); // Default to UTF-16, which may not work if the driver/manager is using some other encoding const char *unicode_enc = cur->cnxn ? cur->cnxn->metadata_enc.name : ENCSTR_UTF16NE; PyObject* msg_value = PyUnicode_Decode( (char*)cMessageText, iTextLength * sizeof(uint16_t), unicode_enc, "strict" ); if (!msg_value) { // If the char cannot be decoded, return something rather than nothing. Py_XDECREF(msg_value); msg_value = PyBytes_FromStringAndSize((char*)cMessageText, iTextLength * sizeof(uint16_t)); } PyObject* msg_tuple = PyTuple_New(2); // the message as a Python tuple of class and value if (msg_class && msg_value && msg_tuple) { PyTuple_SetItem(msg_tuple, 0, msg_class); // msg_tuple now owns the msg_class reference PyTuple_SetItem(msg_tuple, 1, msg_value); // msg_tuple now owns the msg_value reference PyList_Append(msg_list, msg_tuple); Py_XDECREF(msg_tuple); // whether PyList_Append succeeds or not } else { Py_XDECREF(msg_class); Py_XDECREF(msg_value); Py_XDECREF(msg_tuple); } iRecNumber++; } PyMem_Free(cMessageText); Py_XDECREF(cur->messages); cur->messages = msg_list; // cur->messages now owns the msg_list reference return 0; } static PyObject* execute(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first) { // Internal function to execute SQL, called by .execute and .executemany. // // pSql // A PyString, PyUnicode, or derived object containing the SQL. // // params // Pointer to an optional sequence of parameters, and possibly the SQL statement (see skip_first): // (SQL, param1, param2) or (param1, param2). // // skip_first // If true, the first element in `params` is ignored. (It will be the SQL statement and `params` will be the // entire tuple passed to Cursor.execute.) Otherwise all of the params are used. (This case occurs when called // from Cursor.executemany, in which case the sequences do not contain the SQL statement.) Ignored if params is // zero. if (params) { if (!PyTuple_Check(params) && !PyList_Check(params) && !Row_Check(params)) return RaiseErrorV(0, PyExc_TypeError, "Params must be in a list, tuple, or Row"); } // Normalize the parameter variables. int params_offset = skip_first ? 1 : 0; Py_ssize_t cParams = params == 0 ? 0 : PySequence_Length(params) - params_offset; SQLRETURN ret = 0; free_results(cur, FREE_STATEMENT | KEEP_PREPARED); const char* szLastFunction = ""; if (cParams > 0) { // There are parameters, so we'll need to prepare the SQL statement and bind the parameters. (We need to // prepare the statement because we can't bind a NULL (None) object without knowing the target datatype. There // is no one data type that always maps to the others (no, not even varchar)). if (!PrepareAndBind(cur, pSql, params, skip_first)) return 0; szLastFunction = "SQLExecute"; Py_BEGIN_ALLOW_THREADS ret = SQLExecute(cur->hstmt); Py_END_ALLOW_THREADS } else { // REVIEW: Why don't we always prepare? It is highly unlikely that a user would need to execute the same SQL // repeatedly if it did not have parameters, so we are not losing performance, but it would simplify the code. Py_XDECREF(cur->pPreparedSQL); cur->pPreparedSQL = 0; szLastFunction = "SQLExecDirect"; const TextEnc* penc = 0; penc = &cur->cnxn->unicode_enc; Object query(penc->Encode(pSql)); if (!query) return 0; bool isWide = (penc->ctype == SQL_C_WCHAR); const char* pch = PyBytes_AS_STRING(query.Get()); SQLINTEGER cch = (SQLINTEGER)(PyBytes_GET_SIZE(query.Get()) / (isWide ? sizeof(uint16_t) : 1)); Py_BEGIN_ALLOW_THREADS if (isWide) ret = SQLExecDirectW(cur->hstmt, (SQLWCHAR*)pch, cch); else ret = SQLExecDirect(cur->hstmt, (SQLCHAR*)pch, cch); Py_END_ALLOW_THREADS } if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. FreeParameterData(cur); return RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); } if (!SQL_SUCCEEDED(ret) && ret != SQL_NEED_DATA && ret != SQL_NO_DATA) { // We could try dropping through the while and if below, but if there is an error, we need to raise it before // FreeParameterData calls more ODBC functions. RaiseErrorFromHandle(cur->cnxn, "SQLExecDirectW", cur->cnxn->hdbc, cur->hstmt); FreeParameterData(cur); return 0; } if (ret == SQL_SUCCESS_WITH_INFO) { GetDiagRecs(cur); } while (ret == SQL_NEED_DATA) { // One or more parameters were too long to bind normally so we set the // length to SQL_LEN_DATA_AT_EXEC. ODBC will return SQL_NEED_DATA for // each of the parameters we did this for. // // For each one we set a pointer to the ParamInfo as the "parameter // data" we can access with SQLParamData. We've stashed everything we // need in there. szLastFunction = "SQLParamData"; ParamInfo* pInfo; Py_BEGIN_ALLOW_THREADS ret = SQLParamData(cur->hstmt, (SQLPOINTER*)&pInfo); Py_END_ALLOW_THREADS if (ret != SQL_NEED_DATA && ret != SQL_NO_DATA && !SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLParamData", cur->cnxn->hdbc, cur->hstmt); TRACE("SQLParamData() --> %d\n", ret); if (ret == SQL_NEED_DATA) { szLastFunction = "SQLPutData"; if (pInfo->pObject && (PyBytes_Check(pInfo->pObject) || PyByteArray_Check(pInfo->pObject) )) { char *(*pGetPtr)(PyObject*); Py_ssize_t (*pGetLen)(PyObject*); if (PyByteArray_Check(pInfo->pObject)) { pGetPtr = PyByteArray_AsString; pGetLen = PyByteArray_Size; } else { pGetPtr = PyBytes_AsString; pGetLen = PyBytes_Size; } const char* p = pGetPtr(pInfo->pObject); SQLLEN cb = (SQLLEN)pGetLen(pInfo->pObject); SQLLEN offset = 0; do { SQLLEN remaining = pInfo->maxlength ? min(pInfo->maxlength, cb - offset) : cb; TRACE("SQLPutData [%d] (%d) %.10s\n", offset, remaining, &p[offset]); Py_BEGIN_ALLOW_THREADS ret = SQLPutData(cur->hstmt, (SQLPOINTER)&p[offset], remaining); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLPutData", cur->cnxn->hdbc, cur->hstmt); offset += remaining; } while (offset < cb); } else if (pInfo->ParameterType == SQL_SS_TABLE) { // TVP // Need to convert its columns into the bound row buffers int hasTvpRows = 0; if (pInfo->curTvpRow < PySequence_Length(pInfo->pObject)) { PyObject *tvpRow = PySequence_GetItem(pInfo->pObject, pInfo->curTvpRow); Py_XDECREF(tvpRow); for (Py_ssize_t i = 0; i < PySequence_Size(tvpRow); i++) { struct ParamInfo newParam; struct ParamInfo *prevParam = pInfo->nested + i; PyObject *cell = PySequence_GetItem(tvpRow, i); Py_XDECREF(cell); memset(&newParam, 0, sizeof(newParam)); if (!GetParameterInfo(cur, i, cell, newParam, true)) { // Error converting object FreeParameterData(cur); return NULL; } if((newParam.ValueType != SQL_C_DEFAULT && prevParam->ValueType != SQL_C_DEFAULT) && (newParam.ValueType != prevParam->ValueType || newParam.ParameterType != prevParam->ParameterType)) { FreeParameterData(cur); return RaiseErrorV(0, ProgrammingError, "Type mismatch between TVP row values"); } if (prevParam->allocated) PyMem_Free(prevParam->ParameterValuePtr); Py_XDECREF(prevParam->pObject); newParam.BufferLength = newParam.StrLen_or_Ind; newParam.StrLen_or_Ind = SQL_DATA_AT_EXEC; Py_INCREF(cell); newParam.pObject = cell; *prevParam = newParam; if(prevParam->ParameterValuePtr == &newParam.Data) { prevParam->ParameterValuePtr = &prevParam->Data; } } pInfo->curTvpRow++; hasTvpRows = 1; } Py_BEGIN_ALLOW_THREADS ret = SQLPutData(cur->hstmt, hasTvpRows ? (SQLPOINTER)1 : 0, hasTvpRows); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLPutData", cur->cnxn->hdbc, cur->hstmt); } else { // TVP column sent as DAE Py_BEGIN_ALLOW_THREADS ret = SQLPutData(cur->hstmt, pInfo->ParameterValuePtr, pInfo->BufferLength); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLPutData", cur->cnxn->hdbc, cur->hstmt); } ret = SQL_NEED_DATA; } } FreeParameterData(cur); if (ret == SQL_NO_DATA) { // Example: A delete statement that did not delete anything. cur->rowcount = 0; Py_INCREF(cur); return (PyObject*)cur; } if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, szLastFunction, cur->cnxn->hdbc, cur->hstmt); SQLLEN cRows = -1; Py_BEGIN_ALLOW_THREADS ret = SQLRowCount(cur->hstmt, &cRows); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLRowCount", cur->cnxn->hdbc, cur->hstmt); cur->rowcount = (int)cRows; TRACE("SQLRowCount: %d\n", cRows); SQLSMALLINT cCols = 0; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { // Note: The SQL Server driver sometimes returns HY007 here if multiple statements (separated by ;) were // submitted. This is not documented, but I've seen it with multiple successful inserts. return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); } TRACE("SQLNumResultCols: %d\n", cCols); if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. return RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); } if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLRowCount", cur->cnxn->hdbc, cur->hstmt); if (cCols != 0) { // A result set was created. if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, lowercase())) return 0; } Py_INCREF(cur); return (PyObject*)cur; } inline bool IsSequence(PyObject* p) { // Used to determine if the first parameter of execute is a collection of SQL parameters or is a SQL parameter // itself. If the first parameter is a list, tuple, or Row object, then we consider it a collection. Anything // else, including other sequences (e.g. bytearray), are considered SQL parameters. return PyList_Check(p) || PyTuple_Check(p) || Row_Check(p); } static char execute_doc[] = "C.execute(sql, [params]) --> Cursor\n" "\n" "Prepare and execute a database query or command.\n" "\n" "Parameters may be provided as a sequence (as specified by the DB API) or\n" "simply passed in one after another (non-standard):\n" "\n" " cursor.execute(sql, (param1, param2))\n" "\n" " or\n" "\n" " cursor.execute(sql, param1, param2)\n"; PyObject* Cursor_execute(PyObject* self, PyObject* args) { Py_ssize_t cParams = PyTuple_Size(args) - 1; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; if (cParams < 0) { PyErr_SetString(PyExc_TypeError, "execute() takes at least 1 argument (0 given)"); return 0; } PyObject* pSql = PyTuple_GET_ITEM(args, 0); if (!PyUnicode_Check(pSql) && !PyUnicode_Check(pSql)) { PyErr_SetString(PyExc_TypeError, "The first argument to execute must be a string or unicode query."); return 0; } // Figure out if there were parameters and how they were passed. Our optional parameter passing complicates this slightly. bool skip_first = false; PyObject *params = 0; if (cParams == 1 && IsSequence(PyTuple_GET_ITEM(args, 1))) { // There is a single argument and it is a sequence, so we must treat it as a sequence of parameters. (This is // the normal Cursor.execute behavior.) params = PyTuple_GET_ITEM(args, 1); skip_first = false; } else if (cParams > 0) { params = args; skip_first = true; } // Execute. return execute(cursor, pSql, params, skip_first); } static PyObject* Cursor_executemany(PyObject* self, PyObject* args) { Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; cursor->rowcount = -1; PyObject *pSql, *param_seq; if (!PyArg_ParseTuple(args, "OO", &pSql, ¶m_seq)) return 0; if (!PyUnicode_Check(pSql) && !PyUnicode_Check(pSql)) { PyErr_SetString(PyExc_TypeError, "The first argument to execute must be a string or unicode query."); return 0; } if (IsSequence(param_seq)) { Py_ssize_t c = PySequence_Size(param_seq); if (c == 0) { PyErr_SetString(ProgrammingError, "The second parameter to executemany must not be empty."); return 0; } if (cursor->fastexecmany) { free_results(cursor, FREE_STATEMENT | KEEP_PREPARED); if (!ExecuteMulti(cursor, pSql, param_seq)) return 0; } else { for (Py_ssize_t i = 0; i < c; i++) { PyObject* params = PySequence_GetItem(param_seq, i); PyObject* result = execute(cursor, pSql, params, false); bool success = result != 0; Py_XDECREF(result); Py_DECREF(params); if (!success) { cursor->rowcount = -1; return 0; } } } } else if (PyGen_Check(param_seq) || PyIter_Check(param_seq)) { Object iter; if (PyGen_Check(param_seq)) { iter = PyObject_GetIter(param_seq); } else { iter = param_seq; Py_INCREF(param_seq); } Object params; while (params.Attach(PyIter_Next(iter))) { PyObject* result = execute(cursor, pSql, params, false); bool success = result != 0; Py_XDECREF(result); if (!success) { cursor->rowcount = -1; return 0; } } if (PyErr_Occurred()) return 0; } else { PyErr_SetString(ProgrammingError, "The second parameter to executemany must be a sequence, iterator, or generator."); return 0; } cursor->rowcount = -1; Py_RETURN_NONE; } static PyObject* Cursor_setinputsizes(PyObject* self, PyObject* sizes) { if (!Cursor_Check(self)) { PyErr_SetString(ProgrammingError, "Invalid cursor object."); return 0; } Cursor *cur = (Cursor*)self; if (Py_None == sizes) { Py_XDECREF(cur->inputsizes); cur->inputsizes = 0; } else { if (!IsSequence(sizes)) { PyErr_SetString(ProgrammingError, "A non-None parameter to setinputsizes must be a sequence, iterator, or generator."); return 0; } Py_XDECREF(cur->inputsizes); Py_INCREF(sizes); cur->inputsizes = sizes; } Py_RETURN_NONE; } static PyObject* Cursor_fetch(Cursor* cur) { // Internal function to fetch a single row and construct a Row object from it. Used by all of the fetching // functions. // // Returns a Row object if successful. If there are no more rows, zero is returned. If an error occurs, an // exception is set and zero is returned. (To differentiate between the last two, use PyErr_Occurred.) SQLRETURN ret = 0; Py_ssize_t field_count, i; PyObject** apValues; Py_BEGIN_ALLOW_THREADS ret = SQLFetch(cur->hstmt); Py_END_ALLOW_THREADS if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. return RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); } if (ret == SQL_NO_DATA) return 0; if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLFetch", cur->cnxn->hdbc, cur->hstmt); field_count = PyTuple_GET_SIZE(cur->description); apValues = (PyObject**)PyMem_Malloc(sizeof(PyObject*) * field_count); if (apValues == 0) return PyErr_NoMemory(); for (i = 0; i < field_count; i++) { PyObject* value = GetData(cur, i); if (!value) { FreeRowValues(i, apValues); return 0; } apValues[i] = value; } return (PyObject*)Row_InternalNew(cur->description, cur->map_name_to_index, field_count, apValues); } static PyObject* Cursor_fetchlist(Cursor* cur, Py_ssize_t max) { // max // The maximum number of rows to fetch. If -1, fetch all rows. // // Returns a list of Rows. If there are no rows, an empty list is returned. PyObject* results; PyObject* row; results = PyList_New(0); if (!results) return 0; while (max == -1 || max > 0) { row = Cursor_fetch(cur); if (!row) { if (PyErr_Occurred()) { Py_DECREF(results); return 0; } break; } PyList_Append(results, row); Py_DECREF(row); if (max != -1) max--; } return results; } static PyObject* Cursor_iter(PyObject* self) { Py_INCREF(self); return self; } static PyObject* Cursor_iternext(PyObject* self) { // Implements the iterator protocol for cursors. Fetches the next row. Returns zero without setting an exception // when there are no rows. PyObject* result; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; result = Cursor_fetch(cursor); return result; } static PyObject* Cursor_fetchval(PyObject* self, PyObject* args) { UNUSED(args); Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; Object row(Cursor_fetch(cursor)); if (!row) { if (PyErr_Occurred()) return 0; Py_RETURN_NONE; } return Row_item(row, 0); } static PyObject* Cursor_fetchone(PyObject* self, PyObject* args) { UNUSED(args); PyObject* row; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; row = Cursor_fetch(cursor); if (!row) { if (PyErr_Occurred()) return 0; Py_RETURN_NONE; } return row; } static PyObject* Cursor_fetchall(PyObject* self, PyObject* args) { UNUSED(args); PyObject* result; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; result = Cursor_fetchlist(cursor, -1); return result; } static PyObject* Cursor_fetchmany(PyObject* self, PyObject* args) { long rows; PyObject* result; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; rows = cursor->arraysize; if (!PyArg_ParseTuple(args, "|l", &rows)) return 0; result = Cursor_fetchlist(cursor, rows); return result; } static char tables_doc[] = "C.tables(table=None, catalog=None, schema=None, tableType=None) --> self\n" "\n" "Executes SQLTables and creates a results set of tables defined in the data\n" "source.\n" "\n" "The table, catalog, and schema interpret the '_' and '%' characters as\n" "wildcards. The escape character is driver specific, so use\n" "`Connection.searchescape`.\n" "\n" "Each row fetched has the following columns:\n" " 0) table_cat: The catalog name.\n" " 1) table_schem: The schema name.\n" " 2) table_name: The table name.\n" " 3) table_type: One of 'TABLE', 'VIEW', SYSTEM TABLE', 'GLOBAL TEMPORARY'\n" " 'LOCAL TEMPORARY', 'ALIAS', 'SYNONYM', or a data source-specific type name."; char* Cursor_tables_kwnames[] = { "table", "catalog", "schema", "tableType", 0 }; static PyObject* Cursor_tables(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szCatalog = 0; const char* szSchema = 0; const char* szTableName = 0; const char* szTableType = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|zzzz", Cursor_tables_kwnames, &szTableName, &szCatalog, &szSchema, &szTableType)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLTables(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTableName, SQL_NTS, (SQLCHAR*)szTableType, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLTables", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char columns_doc[] = "C.columns(table=None, catalog=None, schema=None, column=None)\n\n" "Creates a results set of column names in specified tables by executing the ODBC SQLColumns function.\n" "Each row fetched has the following columns:\n" " 0) table_cat\n" " 1) table_schem\n" " 2) table_name\n" " 3) column_name\n" " 4) data_type\n" " 5) type_name\n" " 6) column_size\n" " 7) buffer_length\n" " 8) decimal_digits\n" " 9) num_prec_radix\n" " 10) nullable\n" " 11) remarks\n" " 12) column_def\n" " 13) sql_data_type\n" " 14) sql_datetime_sub\n" " 15) char_octet_length\n" " 16) ordinal_position\n" " 17) is_nullable"; char* Cursor_column_kwnames[] = { "table", "catalog", "schema", "column", 0 }; static PyObject* Cursor_columns(PyObject* self, PyObject* args, PyObject* kwargs) { PyObject* pCatalog = 0; PyObject* pSchema = 0; PyObject* pTable = 0; PyObject* pColumn = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOO", Cursor_column_kwnames, &pTable, &pCatalog, &pSchema, &pColumn)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; const TextEnc& enc = cur->cnxn->metadata_enc; SQLWChar catalog(pCatalog, enc); SQLWChar schema(pSchema, enc); SQLWChar table(pTable, enc); SQLWChar column(pColumn, enc); if (!catalog.isValidOrNone() || !schema.isValidOrNone() || !table.isValidOrNone() || !column.isValidOrNone()) return 0; Py_BEGIN_ALLOW_THREADS ret = SQLColumnsW(cur->hstmt, catalog, SQL_NTS, schema, SQL_NTS, table, SQL_NTS, column, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLColumns", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char statistics_doc[] = "C.statistics(catalog=None, schema=None, unique=False, quick=True) --> self\n\n" "Creates a results set of statistics about a single table and the indexes associated with \n" "the table by executing SQLStatistics.\n" "unique\n" " If True, only unique indexes are returned. Otherwise all indexes are returned.\n" "quick\n" " If True, CARDINALITY and PAGES are returned only if they are readily available\n" " from the server\n" "\n" "Each row fetched has the following columns:\n\n" " 0) table_cat\n" " 1) table_schem\n" " 2) table_name\n" " 3) non_unique\n" " 4) index_qualifier\n" " 5) index_name\n" " 6) type\n" " 7) ordinal_position\n" " 8) column_name\n" " 9) asc_or_desc\n" " 10) cardinality\n" " 11) pages\n" " 12) filter_condition"; char* Cursor_statistics_kwnames[] = { "table", "catalog", "schema", "unique", "quick", 0 }; static PyObject* Cursor_statistics(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szCatalog = 0; const char* szSchema = 0; const char* szTable = 0; PyObject* pUnique = Py_False; PyObject* pQuick = Py_True; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|zzOO", Cursor_statistics_kwnames, &szTable, &szCatalog, &szSchema, &pUnique, &pQuick)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLUSMALLINT nUnique = (SQLUSMALLINT)(PyObject_IsTrue(pUnique) ? SQL_INDEX_UNIQUE : SQL_INDEX_ALL); SQLUSMALLINT nReserved = (SQLUSMALLINT)(PyObject_IsTrue(pQuick) ? SQL_QUICK : SQL_ENSURE); SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLStatistics(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, nUnique, nReserved); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLStatistics", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char rowIdColumns_doc[] = "C.rowIdColumns(table, catalog=None, schema=None, nullable=True) -->\n\n" "Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a result set of columns that\n" "uniquely identify a row\n\n" "Each row fetched has the following columns:\n" " 0) scope\n" " 1) column_name\n" " 2) data_type\n" " 3) type_name\n" " 4) column_size\n" " 5) buffer_length\n" " 6) decimal_digits\n" " 7) pseudo_column"; static char rowVerColumns_doc[] = "C.rowIdColumns(table, catalog=None, schema=None, nullable=True) --> self\n\n" "Executes SQLSpecialColumns with SQL_ROWVER which creates a result set of columns that\n" "are automatically updated when any value in the row is updated.\n\n" "Each row fetched has the following columns:\n" " 0) scope\n" " 1) column_name\n" " 2) data_type\n" " 3) type_name\n" " 4) column_size\n" " 5) buffer_length\n" " 6) decimal_digits\n" " 7) pseudo_column"; char* Cursor_specialColumn_kwnames[] = { "table", "catalog", "schema", "nullable", 0 }; static PyObject* _specialColumns(PyObject* self, PyObject* args, PyObject* kwargs, SQLUSMALLINT nIdType) { const char* szTable; const char* szCatalog = 0; const char* szSchema = 0; PyObject* pNullable = Py_True; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|zzO", Cursor_specialColumn_kwnames, &szTable, &szCatalog, &szSchema, &pNullable)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; SQLUSMALLINT nNullable = (SQLUSMALLINT)(PyObject_IsTrue(pNullable) ? SQL_NULLABLE : SQL_NO_NULLS); Py_BEGIN_ALLOW_THREADS ret = SQLSpecialColumns(cur->hstmt, nIdType, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, SQL_SCOPE_TRANSACTION, nNullable); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLSpecialColumns", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static PyObject* Cursor_rowIdColumns(PyObject* self, PyObject* args, PyObject* kwargs) { return _specialColumns(self, args, kwargs, SQL_BEST_ROWID); } static PyObject* Cursor_rowVerColumns(PyObject* self, PyObject* args, PyObject* kwargs) { return _specialColumns(self, args, kwargs, SQL_ROWVER); } static char primaryKeys_doc[] = "C.primaryKeys(table, catalog=None, schema=None) --> self\n\n" "Creates a results set of column names that make up the primary key for a table\n" "by executing the SQLPrimaryKeys function.\n" "Each row fetched has the following columns:\n" " 0) table_cat\n" " 1) table_schem\n" " 2) table_name\n" " 3) column_name\n" " 4) key_seq\n" " 5) pk_name"; char* Cursor_primaryKeys_kwnames[] = { "table", "catalog", "schema", 0 }; static PyObject* Cursor_primaryKeys(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szTable; const char* szCatalog = 0; const char* szSchema = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|zz", Cursor_primaryKeys_kwnames, &szTable, &szCatalog, &szSchema)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLPrimaryKeys(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLPrimaryKeys", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char foreignKeys_doc[] = "C.foreignKeys(table=None, catalog=None, schema=None,\n" " foreignTable=None, foreignCatalog=None, foreignSchema=None) --> self\n\n" "Executes the SQLForeignKeys function and creates a results set of column names\n" "that are foreign keys in the specified table (columns in the specified table\n" "that refer to primary keys in other tables) or foreign keys in other tables\n" "that refer to the primary key in the specified table.\n\n" "Each row fetched has the following columns:\n" " 0) pktable_cat\n" " 1) pktable_schem\n" " 2) pktable_name\n" " 3) pkcolumn_name\n" " 4) fktable_cat\n" " 5) fktable_schem\n" " 6) fktable_name\n" " 7) fkcolumn_name\n" " 8) key_seq\n" " 9) update_rule\n" " 10) delete_rule\n" " 11) fk_name\n" " 12) pk_name\n" " 13) deferrability"; char* Cursor_foreignKeys_kwnames[] = { "table", "catalog", "schema", "foreignTable", "foreignCatalog", "foreignSchema", 0 }; static PyObject* Cursor_foreignKeys(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szTable = 0; const char* szCatalog = 0; const char* szSchema = 0; const char* szForeignTable = 0; const char* szForeignCatalog = 0; const char* szForeignSchema = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|zzzzzz", Cursor_foreignKeys_kwnames, &szTable, &szCatalog, &szSchema, &szForeignTable, &szForeignCatalog, &szForeignSchema)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLForeignKeys(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, (SQLCHAR*)szForeignCatalog, SQL_NTS, (SQLCHAR*)szForeignSchema, SQL_NTS, (SQLCHAR*)szForeignTable, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLForeignKeys", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char getTypeInfo_doc[] = "C.getTypeInfo(sqlType=None) --> self\n\n" "Executes SQLGetTypeInfo a creates a result set with information about the\n" "specified data type or all data types supported by the ODBC driver if not\n" "specified.\n\n" "Each row fetched has the following columns:\n" " 0) type_name\n" " 1) data_type\n" " 2) column_size\n" " 3) literal_prefix\n" " 4) literal_suffix\n" " 5) create_params\n" " 6) nullable\n" " 7) case_sensitive\n" " 8) searchable\n" " 9) unsigned_attribute\n" "10) fixed_prec_scale\n" "11) auto_unique_value\n" "12) local_type_name\n" "13) minimum_scale\n" "14) maximum_scale\n" "15) sql_data_type\n" "16) sql_datetime_sub\n" "17) num_prec_radix\n" "18) interval_precision"; static PyObject* Cursor_getTypeInfo(PyObject* self, PyObject* args, PyObject* kwargs) { UNUSED(kwargs); int nDataType = SQL_ALL_TYPES; if (!PyArg_ParseTuple(args, "|i", &nDataType)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLGetTypeInfo(cur->hstmt, (SQLSMALLINT)nDataType); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetTypeInfo", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static PyObject* Cursor_nextset(PyObject* self, PyObject* args) { UNUSED(args); Cursor* cur = Cursor_Validate(self, 0); if (!cur) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLMoreResults(cur->hstmt); Py_END_ALLOW_THREADS if (ret == SQL_NO_DATA) { free_results(cur, FREE_STATEMENT | KEEP_PREPARED); Py_RETURN_FALSE; } if (!SQL_SUCCEEDED(ret)) { TRACE("nextset: %d not SQL_SUCCEEDED\n", ret); // Note: The SQL Server driver sometimes returns HY007 here if multiple statements (separated by ;) were // submitted. This is not documented, but I've seen it with multiple successful inserts. PyObject* pError = GetErrorFromHandle(cur->cnxn, "SQLMoreResults", cur->cnxn->hdbc, cur->hstmt); // // free_results must be run after the error has been collected // from the cursor as it's lost otherwise. // If free_results raises an error (eg a lost connection) report that instead. // if (!free_results(cur, FREE_STATEMENT | KEEP_PREPARED)) { return 0; } // // Return any error from the GetErrorFromHandle call above. // if (pError) { RaiseErrorFromException(pError); Py_DECREF(pError); return 0; } // // Not clear how we'd get here, but if we're in an error state // without an error, behave as if we had no nextset // Py_RETURN_FALSE; } // Must retrieve DiagRecs immediately after SQLMoreResults if (ret == SQL_SUCCESS_WITH_INFO) { GetDiagRecs(cur); } else { Py_XDECREF(cur->messages); cur->messages = PyList_New(0); } SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { // Note: The SQL Server driver sometimes returns HY007 here if multiple statements (separated by ;) were // submitted. This is not documented, but I've seen it with multiple successful inserts. PyObject* pError = GetErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); free_results(cur, FREE_STATEMENT | KEEP_PREPARED | KEEP_MESSAGES); return pError; } free_results(cur, KEEP_STATEMENT | KEEP_PREPARED | KEEP_MESSAGES); if (cCols != 0) { // A result set was created. if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, lowercase())) return 0; } SQLLEN cRows; Py_BEGIN_ALLOW_THREADS ret = SQLRowCount(cur->hstmt, &cRows); Py_END_ALLOW_THREADS cur->rowcount = (int)cRows; if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLRowCount", cur->cnxn->hdbc, cur->hstmt); Py_RETURN_TRUE; } static char procedureColumns_doc[] = "C.procedureColumns(procedure=None, catalog=None, schema=None) --> self\n\n" "Executes SQLProcedureColumns and creates a result set of information\n" "about stored procedure columns and results.\n" " 0) procedure_cat\n" " 1) procedure_schem\n" " 2) procedure_name\n" " 3) column_name\n" " 4) column_type\n" " 5) data_type\n" " 6) type_name\n" " 7) column_size\n" " 8) buffer_length\n" " 9) decimal_digits\n" " 10) num_prec_radix\n" " 11) nullable\n" " 12) remarks\n" " 13) column_def\n" " 14) sql_data_type\n" " 15) sql_datetime_sub\n" " 16) char_octet_length\n" " 17) ordinal_position\n" " 18) is_nullable"; char* Cursor_procedureColumns_kwnames[] = { "procedure", "catalog", "schema", 0 }; static PyObject* Cursor_procedureColumns(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szProcedure = 0; const char* szCatalog = 0; const char* szSchema = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|zzz", Cursor_procedureColumns_kwnames, &szProcedure, &szCatalog, &szSchema)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLProcedureColumns(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szProcedure, SQL_NTS, 0, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLProcedureColumns", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char procedures_doc[] = "C.procedures(procedure=None, catalog=None, schema=None) --> self\n\n" "Executes SQLProcedures and creates a result set of information about the\n" "procedures in the data source.\n" "Each row fetched has the following columns:\n" " 0) procedure_cat\n" " 1) procedure_schem\n" " 2) procedure_name\n" " 3) num_input_params\n" " 4) num_output_params\n" " 5) num_result_sets\n" " 6) remarks\n" " 7) procedure_type"; char* Cursor_procedures_kwnames[] = { "procedure", "catalog", "schema", 0 }; static PyObject* Cursor_procedures(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szProcedure = 0; const char* szCatalog = 0; const char* szSchema = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|zzz", Cursor_procedures_kwnames, &szProcedure, &szCatalog, &szSchema)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLProcedures(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szProcedure, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLProcedures", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char skip_doc[] = "skip(count) --> None\n" \ "\n" \ "Skips the next `count` records by calling SQLFetchScroll with SQL_FETCH_NEXT.\n" "For convenience, skip(0) is accepted and will do nothing."; static PyObject* Cursor_skip(PyObject* self, PyObject* args) { Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; int count; if (!PyArg_ParseTuple(args, "i", &count)) return 0; if (count == 0) Py_RETURN_NONE; // Note: I'm not sure about the performance implications of looping here -- I certainly would rather use // SQLFetchScroll(SQL_FETCH_RELATIVE, count), but it requires scrollable cursors which are often slower. I would // not expect skip to be used in performance intensive code since different SQL would probably be the "right" // answer instead of skip anyway. SQLRETURN ret = SQL_SUCCESS; Py_BEGIN_ALLOW_THREADS for (int i = 0; i < count && SQL_SUCCEEDED(ret); i++) ret = SQLFetchScroll(cursor->hstmt, SQL_FETCH_NEXT, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret) && ret != SQL_NO_DATA) return RaiseErrorFromHandle(cursor->cnxn, "SQLFetchScroll", cursor->cnxn->hdbc, cursor->hstmt); Py_RETURN_NONE; } static const char* commit_doc = "Commits any pending transaction to the database on the current connection,\n" "including those from other cursors.\n"; static PyObject* Cursor_commit(PyObject* self, PyObject* args) { Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cur) return 0; return Connection_endtrans(cur->cnxn, SQL_COMMIT); } static char rollback_doc[] = "Rolls back any pending transaction to the database on the current connection,\n" "including those from other cursors.\n"; static PyObject* Cursor_rollback(PyObject* self, PyObject* args) { Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cur) return 0; return Connection_endtrans(cur->cnxn, SQL_ROLLBACK); } static char cancel_doc[] = "Cursor.cancel() -> None\n" "Cancels the processing of the current statement.\n" "\n" "Cancels the processing of the current statement.\n" "\n" "This calls SQLCancel and is designed to be called from another thread to" "stop processing of an ongoing query."; static PyObject* Cursor_cancel(PyObject* self, PyObject* args) { UNUSED(args); Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cur) return 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLCancel(cur->hstmt); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLCancel", cur->cnxn->hdbc, cur->hstmt); Py_RETURN_NONE; } static PyObject* Cursor_ignored(PyObject* self, PyObject* args) { UNUSED(self, args); Py_RETURN_NONE; } static char rowcount_doc[] = "This read-only attribute specifies the number of rows the last DML statement\n" " (INSERT, UPDATE, DELETE) affected. This is set to -1 for SELECT statements."; static char description_doc[] = "This read-only attribute is a sequence of 7-item sequences. Each of these\n" \ "sequences contains information describing one result column: (name, type_code,\n" \ "display_size, internal_size, precision, scale, null_ok). All values except\n" \ "name, type_code, and internal_size are None. The type_code entry will be the\n" \ "type object used to create values for that column (e.g. `str` or\n" \ "`datetime.datetime`).\n" \ "\n" \ "This attribute will be None for operations that do not return rows or if the\n" \ "cursor has not had an operation invoked via the execute() method yet.\n" \ "\n" \ "The type_code can be interpreted by comparing it to the Type Objects defined in\n" \ "the DB API and defined the pyodbc module: Date, Time, Timestamp, Binary,\n" \ "STRING, BINARY, NUMBER, and DATETIME."; static char arraysize_doc[] = "This read/write attribute specifies the number of rows to fetch at a time with\n" \ "fetchmany(). It defaults to 1 meaning to fetch a single row at a time."; static char connection_doc[] = "This read-only attribute return a reference to the Connection object on which\n" \ "the cursor was created.\n" \ "\n" \ "The attribute simplifies writing polymorph code in multi-connection\n" \ "environments."; static char fastexecmany_doc[] = "This read/write attribute specifies whether to use a faster executemany() which\n" \ "uses parameter arrays. Not all drivers may work with this implementation."; static char messages_doc[] = "This read-only attribute is a list of all the diagnostic messages in the\n" \ "current result set."; static PyMemberDef Cursor_members[] = { {"rowcount", T_INT, offsetof(Cursor, rowcount), READONLY, rowcount_doc }, {"description", T_OBJECT_EX, offsetof(Cursor, description), READONLY, description_doc }, {"arraysize", T_INT, offsetof(Cursor, arraysize), 0, arraysize_doc }, {"connection", T_OBJECT_EX, offsetof(Cursor, cnxn), READONLY, connection_doc }, {"fast_executemany",T_BOOL, offsetof(Cursor, fastexecmany), 0, fastexecmany_doc }, {"messages", T_OBJECT_EX, offsetof(Cursor, messages), READONLY, messages_doc }, { 0 } }; static PyObject* Cursor_getnoscan(PyObject* self, void *closure) { UNUSED(closure); Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; SQLULEN noscan = SQL_NOSCAN_OFF; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetStmtAttr(cursor->hstmt, SQL_ATTR_NOSCAN, (SQLPOINTER)&noscan, sizeof(SQLULEN), 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { // Not supported? We're going to assume 'no'. Py_RETURN_FALSE; } if (noscan == SQL_NOSCAN_OFF) Py_RETURN_FALSE; Py_RETURN_TRUE; } static int Cursor_setnoscan(PyObject* self, PyObject* value, void *closure) { UNUSED(closure); Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return -1; if (value == 0) { PyErr_SetString(PyExc_TypeError, "Cannot delete the noscan attribute"); return -1; } uintptr_t noscan = PyObject_IsTrue(value) ? SQL_NOSCAN_ON : SQL_NOSCAN_OFF; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetStmtAttr(cursor->hstmt, SQL_ATTR_NOSCAN, (SQLPOINTER)noscan, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cursor->cnxn, "SQLSetStmtAttr(SQL_ATTR_NOSCAN)", cursor->cnxn->hdbc, cursor->hstmt); return -1; } return 0; } static PyGetSetDef Cursor_getsetters[] = { {"noscan", Cursor_getnoscan, Cursor_setnoscan, "NOSCAN statement attr", 0}, { 0 } }; static char executemany_doc[] = "executemany(sql, seq_of_params) --> Cursor | count | None\n" \ "\n" \ "Prepare a database query or command and then execute it against all parameter\n" \ "sequences found in the sequence seq_of_params.\n" \ "\n" \ "Only the result of the final execution is returned. See `execute` for a\n" \ "description of parameter passing the return value."; static char nextset_doc[] = "nextset() --> True | None\n" \ "\n" \ "Jumps to the next resultset if the last sql has multiple resultset." \ "Returns True if there is a next resultset otherwise None."; static char ignored_doc[] = "Ignored."; static char fetchval_doc[] = "fetchval() --> value | None\n" \ "\n" "Returns the first column of the next row in the result set or None\n" \ "if there are no more rows."; static char fetchone_doc[] = "fetchone() --> Row | None\n" \ "\n" \ "Fetch the next row of a query result set, returning a single Row instance, or\n" \ "None when no more data is available.\n" \ "\n" \ "A ProgrammingError exception is raised if the previous call to execute() did\n" \ "not produce any result set or no call was issued yet."; static char fetchmany_doc[] = "fetchmany(size=cursor.arraysize) --> list of Rows\n" \ "\n" \ "Fetch the next set of rows of a query result, returning a list of Row\n" \ "instances. An empty list is returned when no more rows are available.\n" \ "\n" \ "The number of rows to fetch per call is specified by the parameter. If it is\n" \ "not given, the cursor's arraysize determines the number of rows to be\n" \ "fetched. The method should try to fetch as many rows as indicated by the size\n" \ "parameter. If this is not possible due to the specified number of rows not\n" \ "being available, fewer rows may be returned.\n" \ "\n" \ "A ProgrammingError exception is raised if the previous call to execute() did\n" \ "not produce any result set or no call was issued yet."; static char fetchall_doc[] = "fetchall() --> list of Rows\n" \ "\n" \ "Fetch all remaining rows of a query result, returning them as a list of Rows.\n" \ "An empty list is returned if there are no more rows.\n" \ "\n" \ "A ProgrammingError exception is raised if the previous call to execute() did\n" \ "not produce any result set or no call was issued yet."; static char setinputsizes_doc[] = "setinputsizes(sizes) -> None\n" \ "\n" \ "Sets the type information to be used when binding parameters.\n" \ "sizes must be a sequence of values, one for each input parameter.\n" \ "Each value may be an integer to override the column size when binding character\n" \ "data, a Type Object to override the SQL type, or a sequence of integers to specify\n" \ "(SQL type, column size, decimal digits) where any may be none to use the default.\n" \ "\n" \ "Parameters beyond the length of the sequence will be bound with the defaults.\n" \ "Setting sizes to None reverts all parameters to the defaults."; static char enter_doc[] = "__enter__() -> self."; static PyObject* Cursor_enter(PyObject* self, PyObject* args) { UNUSED(args); Py_INCREF(self); return self; } static char exit_doc[] = "__exit__(*excinfo) -> None. Commits the connection if necessary.."; static PyObject* Cursor_exit(PyObject* self, PyObject* args) { Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; // If an error has occurred, `args` will be a tuple of 3 values. Otherwise it will be a tuple of 3 `None`s. assert(PyTuple_Check(args)); if (cursor->cnxn->nAutoCommit == SQL_AUTOCOMMIT_OFF && PyTuple_GetItem(args, 0) == Py_None) { SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLEndTran(SQL_HANDLE_DBC, cursor->cnxn->hdbc, SQL_COMMIT); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cursor->cnxn, "SQLEndTran(SQL_COMMIT)", cursor->cnxn->hdbc, cursor->hstmt); } Py_RETURN_NONE; } static PyMethodDef Cursor_methods[] = { { "close", (PyCFunction)Cursor_close, METH_NOARGS, close_doc }, { "execute", (PyCFunction)Cursor_execute, METH_VARARGS, execute_doc }, { "executemany", (PyCFunction)Cursor_executemany, METH_VARARGS, executemany_doc }, { "setinputsizes", (PyCFunction)Cursor_setinputsizes, METH_O, setinputsizes_doc }, { "setoutputsize", (PyCFunction)Cursor_ignored, METH_VARARGS, ignored_doc }, { "fetchval", (PyCFunction)Cursor_fetchval, METH_NOARGS, fetchval_doc }, { "fetchone", (PyCFunction)Cursor_fetchone, METH_NOARGS, fetchone_doc }, { "fetchall", (PyCFunction)Cursor_fetchall, METH_NOARGS, fetchall_doc }, { "fetchmany", (PyCFunction)Cursor_fetchmany, METH_VARARGS, fetchmany_doc }, { "nextset", (PyCFunction)Cursor_nextset, METH_NOARGS, nextset_doc }, { "tables", (PyCFunction)Cursor_tables, METH_VARARGS|METH_KEYWORDS, tables_doc }, { "columns", (PyCFunction)Cursor_columns, METH_VARARGS|METH_KEYWORDS, columns_doc }, { "statistics", (PyCFunction)Cursor_statistics, METH_VARARGS|METH_KEYWORDS, statistics_doc }, { "rowIdColumns", (PyCFunction)Cursor_rowIdColumns, METH_VARARGS|METH_KEYWORDS, rowIdColumns_doc }, { "rowVerColumns", (PyCFunction)Cursor_rowVerColumns, METH_VARARGS|METH_KEYWORDS, rowVerColumns_doc }, { "primaryKeys", (PyCFunction)Cursor_primaryKeys, METH_VARARGS|METH_KEYWORDS, primaryKeys_doc }, { "foreignKeys", (PyCFunction)Cursor_foreignKeys, METH_VARARGS|METH_KEYWORDS, foreignKeys_doc }, { "getTypeInfo", (PyCFunction)Cursor_getTypeInfo, METH_VARARGS|METH_KEYWORDS, getTypeInfo_doc }, { "procedures", (PyCFunction)Cursor_procedures, METH_VARARGS|METH_KEYWORDS, procedures_doc }, { "procedureColumns", (PyCFunction)Cursor_procedureColumns, METH_VARARGS|METH_KEYWORDS, procedureColumns_doc }, { "skip", (PyCFunction)Cursor_skip, METH_VARARGS, skip_doc }, { "commit", (PyCFunction)Cursor_commit, METH_NOARGS, commit_doc }, { "rollback", (PyCFunction)Cursor_rollback, METH_NOARGS, rollback_doc }, {"cancel", (PyCFunction)Cursor_cancel, METH_NOARGS, cancel_doc}, {"__enter__", Cursor_enter, METH_NOARGS, enter_doc }, {"__exit__", Cursor_exit, METH_VARARGS, exit_doc }, {0, 0, 0, 0} }; static char cursor_doc[] = "Cursor objects represent a database cursor, which is used to manage the context\n" \ "of a fetch operation. Cursors created from the same connection are not\n" \ "isolated, i.e., any changes done to the database by a cursor are immediately\n" \ "visible by the other cursors. Cursors created from different connections are\n" \ "isolated.\n" \ "\n" \ "Cursors implement the iterator protocol, so results can be iterated:\n" \ "\n" \ " cursor.execute(sql)\n" \ " for row in cursor:\n" \ " print row[0]"; PyTypeObject CursorType = { PyVarObject_HEAD_INIT(0, 0) "pyodbc.Cursor", // tp_name sizeof(Cursor), // tp_basicsize 0, // tp_itemsize (destructor)Cursor_dealloc, // destructor tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare 0, // tp_repr 0, // tp_as_number 0, // tp_as_sequence 0, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, cursor_doc, // tp_doc 0, // tp_traverse 0, // tp_clear 0, // tp_richcompare 0, // tp_weaklistoffset Cursor_iter, // tp_iter Cursor_iternext, // tp_iternext Cursor_methods, // tp_methods Cursor_members, // tp_members Cursor_getsetters, // tp_getset 0, // tp_base 0, // tp_dict 0, // tp_descr_get 0, // tp_descr_set 0, // tp_dictoffset 0, // tp_init 0, // tp_alloc 0, // tp_new 0, // tp_free 0, // tp_is_gc 0, // tp_bases 0, // tp_mro 0, // tp_cache 0, // tp_subclasses 0, // tp_weaklist }; Cursor* Cursor_New(Connection* cnxn) { // Exported to allow the connection class to create cursors. #ifdef _MSC_VER #pragma warning(disable : 4365) #endif Cursor* cur = PyObject_NEW(Cursor, &CursorType); #ifdef _MSC_VER #pragma warning(default : 4365) #endif if (cur) { cur->cnxn = cnxn; cur->hstmt = SQL_NULL_HANDLE; cur->description = Py_None; cur->pPreparedSQL = 0; cur->paramcount = 0; cur->paramtypes = 0; cur->paramInfos = 0; cur->inputsizes = 0; cur->colinfos = 0; cur->arraysize = 1; cur->rowcount = -1; cur->map_name_to_index = 0; cur->fastexecmany = 0; cur->messages = Py_None; Py_INCREF(cnxn); Py_INCREF(cur->description); Py_INCREF(cur->messages); SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLAllocHandle(SQL_HANDLE_STMT, cnxn->hdbc, &cur->hstmt); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLAllocHandle", cnxn->hdbc, SQL_NULL_HANDLE); Py_DECREF(cur); return 0; } if (cnxn->timeout) { Py_BEGIN_ALLOW_THREADS ret = SQLSetStmtAttr(cur->hstmt, SQL_ATTR_QUERY_TIMEOUT, (SQLPOINTER)(uintptr_t)cnxn->timeout, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLSetStmtAttr(SQL_ATTR_QUERY_TIMEOUT)", cnxn->hdbc, cur->hstmt); Py_DECREF(cur); return 0; } } TRACE("cursor.new cnxn=%p hdbc=%d cursor=%p hstmt=%d\n", (Connection*)cur->cnxn, ((Connection*)cur->cnxn)->hdbc, cur, cur->hstmt); } return cur; } void Cursor_init() { PyDateTime_IMPORT; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/cursor.h0000644000175100001770000001453714560207600014671 0ustar00runnerdocker/* * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef CURSOR_H #define CURSOR_H struct Connection; struct ColumnInfo { SQLSMALLINT sql_type; // The column size from SQLDescribeCol. For character types, this is the maximum length, not including the NULL // terminator. For binary values, this is the maximum length. For numeric and decimal values, it is the defined // number of digits. For example, the precision of a column defined as NUMERIC(10,3) is 10. // // This value can be SQL_NO_TOTAL in which case the driver doesn't know the maximum length, such as for LONGVARCHAR // fields. SQLULEN column_size; // Tells us if an integer type is signed or unsigned. This is determined after a query using SQLColAttribute. All // of the integer types are the same size whether signed and unsigned, so we can allocate memory ahead of time // without knowing this. We use this during the fetch when converting to a Python integer or long. bool is_unsigned; }; struct ParamInfo { // The following correspond to the SQLBindParameter parameters. SQLSMALLINT ValueType; SQLSMALLINT ParameterType; SQLULEN ColumnSize; SQLSMALLINT DecimalDigits; // The value pointer that will be bound. If `alloc` is true, this was allocated with malloc and must be freed. // Otherwise it is zero or points into memory owned by the original Python parameter. SQLPOINTER ParameterValuePtr; SQLLEN BufferLength; SQLLEN StrLen_or_Ind; // If true, the memory in ParameterValuePtr was allocated via malloc and must be freed. bool allocated; PyObject* pObject; // An optional object that will be decremented at the end of the execute. // This is useful when the ParameterValuePtr data is in a Python object - // the object can be put here (and INCREFed if necessary!) instead of // copying the data out. // // If SQLPutData is used, this must be set to a bytes or bytearray object! SQLLEN maxlength; // If SQLPutData is being used, this must be set to the amount that can be // written to each SQLPutData call. (It is not clear if they are limited // like SQLBindParameter or not.) // For TVPs, the nested descriptors and current row. struct ParamInfo *nested; SQLLEN curTvpRow; // Optional data. If used, ParameterValuePtr will point into this. union { unsigned char ch; int i32; INT64 i64; double dbl; TIMESTAMP_STRUCT timestamp; DATE_STRUCT date; TIME_STRUCT time; } Data; }; struct Cursor { PyObject_HEAD // The Connection object (which is a PyObject) that created this cursor. Connection* cnxn; // Set to SQL_NULL_HANDLE when the cursor is closed. HSTMT hstmt; // // SQL Parameters // // If non-zero, a pointer to the previously prepared SQL string, allowing us to skip the prepare and gathering of // parameter data. PyObject* pPreparedSQL; // The number of parameter markers in pPreparedSQL. This will be zero when pPreparedSQL is zero but is set // immediately after preparing the SQL. int paramcount; // If non-zero, a pointer to an array of SQL type values allocated via malloc. This is zero until we actually ask // for the type of parameter, which is only when a parameter is None (NULL). At that point, the entire array is // allocated (length == paramcount) but all entries are set to SQL_UNKNOWN_TYPE. SQLSMALLINT* paramtypes; // If non-zero, a pointer to a buffer containing the actual parameters bound. If pPreparedSQL is zero, this should // be freed using free and set to zero. // // Even if the same SQL statement is executed twice, the parameter bindings are redone from scratch since we try to // bind into the Python objects directly. ParamInfo* paramInfos; // Parameter set array (used with executemany) unsigned char *paramArray; // Whether to use fast executemany with parameter arrays and other optimisations char fastexecmany; // The list of information for setinputsizes(). PyObject *inputsizes; // // Result Information // // An array of ColumnInfos, allocated via malloc. This will be zero when closed or when there are no query // results. ColumnInfo* colinfos; // The description tuple described in the DB API 2.0 specification. Set to None when there are no results. PyObject* description; int arraysize; // The Cursor.rowcount attribute from the DB API specification. int rowcount; // A dictionary that maps from column name (PyString) to index into the result columns (PyInteger). This is // constructed during an execute and shared with each row (reference counted) to implement accessing results by // column name. // // This duplicates some ODBC functionality, but allows us to use Row objects after the statement is closed and // should use less memory than putting each column into the Row's __dict__. // // Since this is shared by Row objects, it cannot be reused. New dictionaries are created for every execute. This // will be zero whenever there are no results. PyObject* map_name_to_index; // The messages attribute described in the DB API 2.0 specification. // Contains a list of all non-data messages provided by the driver, retrieved using SQLGetDiagRec. PyObject* messages; }; int GetDiagRecs(Cursor* cur); void Cursor_init(); Cursor* Cursor_New(Connection* cnxn); PyObject* Cursor_execute(PyObject* self, PyObject* args); #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/dbspecific.h0000644000175100001770000000252514560207600015441 0ustar00runnerdocker#ifndef DBSPECIFIC_H #define DBSPECIFIC_H // Items specific to databases. // // Obviously we'd like to minimize this, but if they are needed this file isolates them. I'd like for there to be a // single build of pyodbc on each platform and not have a bunch of defines for supporting different databases. // --------------------------------------------------------------------------------------------------------------------- // SQL Server #define SQL_SS_XML -152 // SQL Server 2005 XML type #define SQL_DB2_DECFLOAT -360 // IBM DB/2 DECFLOAT type #define SQL_DB2_XML -370 // IBM DB/2 XML type #define SQL_SS_TIME2 -154 // SQL Server 2008 time type struct SQL_SS_TIME2_STRUCT { SQLUSMALLINT hour; SQLUSMALLINT minute; SQLUSMALLINT second; SQLUINTEGER fraction; }; // The SQLGUID type isn't always available when compiling, so we'll make our own with a // different name. struct PYSQLGUID { // I was hoping to use uint32_t, etc., but they aren't included in a Python build. I'm not // going to require that the compilers supply anything beyond that. There is PY_UINT32_T, // but there is no 16-bit version. We'll stick with Microsoft's WORD and DWORD which I // believe the ODBC headers will have to supply. DWORD Data1; WORD Data2; WORD Data3; byte Data4[8]; }; #endif // DBSPECIFIC_H ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/decimal.cpp0000644000175100001770000000776614560207600015313 0ustar00runnerdocker #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "decimal.h" static PyObject* decimal = 0; // The Decimal constructor. static PyObject* re_sub = 0; static PyObject* re_compile = 0; static PyObject* re_escape = 0; // In Python 2.7, the 3 strings below are bytes objects. In 3.x they are Unicode objects. static PyObject* pDecimalPoint = 0; // A "." object which we replace pLocaleDecimal with if they are not the same. static PyObject* pLocaleDecimal = 0; // The decimal character used by the locale. This can be overridden by the user. // // In 2.7 this is a bytes object, otherwise unicode. static PyObject* pLocaleDecimalEscaped = 0; // A version of pLocaleDecimal escaped to be used in a regular expression. (The character // could be something special in regular expressions.) This is zero when pLocaleDecimal is // ".", indicating no replacement is necessary. static PyObject* pRegExpRemove = 0; // A regular expression that matches characters we want to remove before parsing. bool InitializeDecimal() { // This is called when the module is initialized and creates globals. Object d(PyImport_ImportModule("decimal")); decimal = PyObject_GetAttrString(d, "Decimal"); if (!decimal) return 0; Object re(PyImport_ImportModule("re")); re_sub = PyObject_GetAttrString(re, "sub"); re_escape = PyObject_GetAttrString(re, "escape"); re_compile = PyObject_GetAttrString(re, "compile"); Object module(PyImport_ImportModule("locale")); Object ldict(PyObject_CallMethod(module, "localeconv", 0)); Object point(PyDict_GetItemString(ldict, "decimal_point")); if (!point) return false; pDecimalPoint = PyUnicode_FromString("."); if (!pDecimalPoint) return false; if (!SetDecimalPoint(point)) return false; return true; } PyObject* GetDecimalPoint() { Py_INCREF(pLocaleDecimal); return pLocaleDecimal; } bool SetDecimalPoint(PyObject* pNew) { if (PyObject_RichCompareBool(pNew, pDecimalPoint, Py_EQ) == 1) { // They are the same. Py_XDECREF(pLocaleDecimal); pLocaleDecimal = pDecimalPoint; Py_INCREF(pLocaleDecimal); Py_XDECREF(pLocaleDecimalEscaped); pLocaleDecimalEscaped = 0; } else { // They are different, so we'll need a regular expression to match it so it can be // replaced in getdata GetDataDecimal. Py_XDECREF(pLocaleDecimal); pLocaleDecimal = pNew; Py_INCREF(pLocaleDecimal); Object e(PyObject_CallFunctionObjArgs(re_escape, pNew, 0)); if (!e) return false; Py_XDECREF(pLocaleDecimalEscaped); pLocaleDecimalEscaped = e.Detach(); } Object s(PyUnicode_FromFormat("[^0-9%U-]+", pLocaleDecimal)); if (!s) return false; Object r(PyObject_CallFunctionObjArgs(re_compile, s.Get(), 0)); if (!r) return false; Py_XDECREF(pRegExpRemove); pRegExpRemove = r.Detach(); return true; } PyObject* DecimalFromText(const TextEnc& enc, const byte* pb, Py_ssize_t cb) { // Creates a Decimal object from a text buffer. // The Decimal constructor requires the decimal point to be '.', so we need to convert the // locale's decimal to it. We also need to remove non-decimal characters such as thousands // separators and currency symbols. // // Remember that the thousands separate will often be '.', so have to do this carefully. // We'll create a regular expression with 0-9 and whatever the thousands separator is. Object text(TextBufferToObject(enc, pb, cb)); if (!text) return 0; Object cleaned(PyObject_CallMethod(pRegExpRemove, "sub", "sO", "", text.Get())); if (!cleaned) return 0; if (pLocaleDecimalEscaped) { Object c2(PyObject_CallFunctionObjArgs(re_sub, pLocaleDecimalEscaped, pDecimalPoint, 0)); if (!c2) return 0; cleaned.Attach(c2.Detach()); } return PyObject_CallFunctionObjArgs(decimal, cleaned.Get(), 0); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/decimal.h0000644000175100001770000000027214560207600014741 0ustar00runnerdocker#pragma once bool InitializeDecimal(); PyObject* GetDecimalPoint(); bool SetDecimalPoint(PyObject* pNew); PyObject* DecimalFromText(const TextEnc& enc, const byte* pb, Py_ssize_t cb); ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/errors.cpp0000644000175100001770000002413514560207600015216 0ustar00runnerdocker #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "connection.h" #include "errors.h" #include "pyodbcmodule.h" // Exceptions struct SqlStateMapping { char* prefix; size_t prefix_len; PyObject** pexc_class; // Note: Double indirection (pexc_class) necessary because the pointer values are not // initialized during startup }; static const struct SqlStateMapping sql_state_mapping[] = { { "01002", 5, &OperationalError }, { "08001", 5, &OperationalError }, { "08003", 5, &OperationalError }, { "08004", 5, &OperationalError }, { "08007", 5, &OperationalError }, { "08S01", 5, &OperationalError }, { "0A000", 5, &NotSupportedError }, { "28000", 5, &InterfaceError }, { "40002", 5, &IntegrityError }, { "22", 2, &DataError }, { "23", 2, &IntegrityError }, { "24", 2, &ProgrammingError }, { "25", 2, &ProgrammingError }, { "42", 2, &ProgrammingError }, { "HY001", 5, &OperationalError }, { "HY014", 5, &OperationalError }, { "HYT00", 5, &OperationalError }, { "HYT01", 5, &OperationalError }, { "IM001", 5, &InterfaceError }, { "IM002", 5, &InterfaceError }, { "IM003", 5, &InterfaceError }, }; static PyObject* ExceptionFromSqlState(const char* sqlstate) { // Returns the appropriate Python exception class given a SQLSTATE value. if (sqlstate && *sqlstate) { for (size_t i = 0; i < _countof(sql_state_mapping); i++) if (memcmp(sqlstate, sql_state_mapping[i].prefix, sql_state_mapping[i].prefix_len) == 0) return *sql_state_mapping[i].pexc_class; } return Error; } PyObject* RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* format, ...) { PyObject *pAttrs = 0, *pError = 0; if (!sqlstate || !*sqlstate) sqlstate = "HY000"; if (!exc_class) exc_class = ExceptionFromSqlState(sqlstate); // Note: Don't use any native strprintf routines. With Py_ssize_t, we need "%zd", but VC .NET doesn't support it. // PyUnicode_FromFormatV already takes this into account. va_list marker; va_start(marker, format); PyObject* pMsg = PyUnicode_FromFormatV(format, marker); va_end(marker); if (!pMsg) { PyErr_NoMemory(); return 0; } // Create an exception with a 'sqlstate' attribute (set to None if we don't have one) whose 'args' attribute is a // tuple containing the message and sqlstate value. The 'sqlstate' attribute ensures it is easy to access in // Python (and more understandable to the reader than ex.args[1]), but putting it in the args ensures it shows up // in logs because of the default repr/str. pAttrs = Py_BuildValue("(Os)", pMsg, sqlstate); if (pAttrs) { pError = PyObject_CallObject(exc_class, pAttrs); if (pError) RaiseErrorFromException(pError); } Py_DECREF(pMsg); Py_XDECREF(pAttrs); Py_XDECREF(pError); return 0; } bool HasSqlState(PyObject* ex, const char* szSqlState) { // Returns true if `ex` is an exception and has the given SQLSTATE. It is safe to pass 0 for // `ex`. if (!ex) return false; Object args(PyObject_GetAttrString(ex, "args")); if (!args) return false; Object sqlstate(PySequence_GetItem(args, 1)); if (!sqlstate || !PyBytes_Check(sqlstate)) return false; const char* sz = PyBytes_AsString(sqlstate); return (sz && _strcmpi(sz, szSqlState) == 0); } static PyObject* GetError(const char* sqlstate, PyObject* exc_class, PyObject* pMsg) { // pMsg // The error message. This function takes ownership of this object, so we'll free it if we fail to create an // error. PyObject *pSqlState=0, *pAttrs=0, *pError=0; if (!sqlstate || !*sqlstate) sqlstate = "HY000"; if (!exc_class) exc_class = ExceptionFromSqlState(sqlstate); pAttrs = PyTuple_New(2); if (!pAttrs) { Py_DECREF(pMsg); return 0; } PyTuple_SetItem(pAttrs, 1, pMsg); // pAttrs now owns the pMsg reference; steals a reference, does not increment pSqlState = PyUnicode_FromString(sqlstate); if (!pSqlState) { Py_DECREF(pAttrs); return 0; } PyTuple_SetItem(pAttrs, 0, pSqlState); // pAttrs now owns the pSqlState reference pError = PyObject_CallObject(exc_class, pAttrs); // pError will incref pAttrs Py_XDECREF(pAttrs); return pError; } static const char* DEFAULT_ERROR = "The driver did not supply an error!"; PyObject* RaiseErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc, HSTMT hstmt) { // The exception is "set" in the interpreter. This function returns 0 so this can be used in a return statement. PyObject* pError = GetErrorFromHandle(conn, szFunction, hdbc, hstmt); if (pError) { RaiseErrorFromException(pError); Py_DECREF(pError); } return 0; } PyObject* GetErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc, HSTMT hstmt) { TRACE("In RaiseError(%s)!\n", szFunction); // Creates and returns an exception from ODBC error information. // // ODBC can generate a chain of errors which we concatenate into one error message. We use the SQLSTATE from the // first message, which seems to be the most detailed, to determine the class of exception. // // If the function fails, for example, if it runs out of memory, zero is returned. // // szFunction // The name of the function that failed. Python generates a useful stack trace, but we often don't know where in // the C++ code we failed. SQLSMALLINT nHandleType; SQLHANDLE h; char sqlstate[6] = ""; SQLINTEGER nNativeError; SQLSMALLINT cchMsg; uint16_t sqlstateT[6]; SQLSMALLINT msgLen = 1023; uint16_t *szMsg = (uint16_t*) PyMem_Malloc((msgLen + 1) * sizeof(uint16_t)); if (!szMsg) { PyErr_NoMemory(); return 0; } if (hstmt != SQL_NULL_HANDLE) { nHandleType = SQL_HANDLE_STMT; h = hstmt; } else if (hdbc != SQL_NULL_HANDLE) { nHandleType = SQL_HANDLE_DBC; h = hdbc; } else { nHandleType = SQL_HANDLE_ENV; h = henv; } // unixODBC + PostgreSQL driver 07.01.0003 (Fedora 8 binaries from RPMs) crash if you call SQLGetDiagRec more // than once. I hate to do this, but I'm going to only call it once for non-Windows platforms for now... SQLSMALLINT iRecord = 1; Object msg; for (;;) { szMsg[0] = 0; sqlstateT[0] = 0; nNativeError = 0; cchMsg = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetDiagRecW(nHandleType, h, iRecord, (SQLWCHAR*)sqlstateT, &nNativeError, (SQLWCHAR*)szMsg, msgLen, &cchMsg); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) break; // If needed, allocate a bigger error message buffer and retry. if (cchMsg > msgLen - 1) { msgLen = cchMsg + 1; if (!PyMem_Realloc((BYTE**) &szMsg, (msgLen + 1) * sizeof(uint16_t))) { PyErr_NoMemory(); PyMem_Free(szMsg); return 0; } Py_BEGIN_ALLOW_THREADS ret = SQLGetDiagRecW(nHandleType, h, iRecord, (SQLWCHAR*)sqlstateT, &nNativeError, (SQLWCHAR*)szMsg, msgLen, &cchMsg); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) break; } // Not always NULL terminated (MS Access) sqlstateT[5] = 0; // For now, default to UTF-16 if this is not in the context of a connection. // Note that this will not work if the DM is using a different wide encoding (e.g. UTF-32). const char *unicode_enc = conn ? conn->metadata_enc.name : ENCSTR_UTF16NE; Object msgStr(PyUnicode_Decode((char*)szMsg, cchMsg * sizeof(uint16_t), unicode_enc, "strict")); if (cchMsg != 0 && msgStr.Get()) { if (iRecord == 1) { // This is the first error message, so save the SQLSTATE for determining the // exception class and append the calling function name. CopySqlState(sqlstateT, sqlstate); msg = PyUnicode_FromFormat("[%s] %V (%ld) (%s)", sqlstate, msgStr.Get(), "(null)", (long)nNativeError, szFunction); if (!msg) { PyErr_NoMemory(); PyMem_Free(szMsg); return 0; } } else { // This is not the first error message, so append to the existing one. Object more(PyUnicode_FromFormat("; [%s] %V (%ld)", sqlstate, msgStr.Get(), "(null)", (long)nNativeError)); if (!more) break; // Something went wrong, but we'll return the msg we have so far Object both(PyUnicode_Concat(msg, more)); if (!both) break; msg = both.Detach(); } } iRecord++; #ifndef _MSC_VER // See non-Windows comment above break; #endif } // Raw message buffer not needed anymore PyMem_Free(szMsg); if (!msg || PyUnicode_GET_LENGTH(msg.Get()) == 0) { // This only happens using unixODBC. (Haven't tried iODBC yet.) Either the driver or the driver manager is // buggy and has signaled a fault without recording error information. sqlstate[0] = '\0'; msg = PyUnicode_FromString(DEFAULT_ERROR); if (!msg) { PyErr_NoMemory(); return 0; } } return GetError(sqlstate, 0, msg.Detach()); } static bool GetSqlState(HSTMT hstmt, char* szSqlState) { SQLSMALLINT cchMsg; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetDiagField(SQL_HANDLE_STMT, hstmt, 1, SQL_DIAG_SQLSTATE, (SQLCHAR*)szSqlState, 5, &cchMsg); Py_END_ALLOW_THREADS return SQL_SUCCEEDED(ret); } bool HasSqlState(HSTMT hstmt, const char* szSqlState) { char szActual[6]; if (!GetSqlState(hstmt, szActual)) return false; return memcmp(szActual, szSqlState, 5) == 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/errors.h0000644000175100001770000000723314560207600014663 0ustar00runnerdocker #ifndef _ERRORS_H_ #define _ERRORS_H_ // Sets an exception based on the ODBC SQLSTATE and error message and returns zero. If either handle is not available, // pass SQL_NULL_HANDLE. // // conn // The connection object, from which it will use the Unicode encoding. May be null if not available. // // szFunction // The name of the function that failed. Python generates a useful stack trace, but we often don't know where in the // C++ code we failed. // PyObject* RaiseErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc, HSTMT hstmt); // Sets an exception using a printf-like error message. // // szSqlState // The optional SQLSTATE reported by ODBC. If not provided (sqlstate is NULL or sqlstate[0] is NULL), "HY000" // (General Error) is used. Note that HY000 causes Error to be used if exc_class is not provided. // // exc_class // The optional exception class (DatabaseError, etc.) to construct. If NULL, the appropriate class will be // determined from the SQLSTATE. // PyObject* RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* format, ...); // Constructs an exception and returns it. // // This function is like RaiseErrorFromHandle, but gives you the ability to examine the error first (in particular, // used to examine the SQLSTATE using HasSqlState). If you want to use the error, call PyErr_SetObject(ex->ob_type, // ex). Otherwise, dispose of the error using Py_DECREF(ex). // // conn // The connection object, from which it will use the Unicode encoding. May be null if not available. // // szFunction // The name of the function that failed. Python generates a useful stack trace, but we often don't know where in the // C++ code we failed. // PyObject* GetErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc, HSTMT hstmt); // Returns true if `ex` is a database exception with SQLSTATE `szSqlState`. Returns false otherwise. // // It is safe to call with ex set to zero. The SQLSTATE comparison is case-insensitive. // bool HasSqlState(PyObject* ex, const char* szSqlState); // Returns true if the HSTMT has a diagnostic record with the given SQLSTATE. This is used after SQLGetData call that // returned SQL_SUCCESS_WITH_INFO to see if it also has SQLSTATE 01004, indicating there is more data. // bool HasSqlState(HSTMT hstmt, const char* szSqlState); inline PyObject* RaiseErrorFromException(PyObject* pError) { // PyExceptionInstance_Class doesn't exist in 2.4 PyErr_SetObject((PyObject*)Py_TYPE(pError), pError); return 0; } inline void CopySqlState(const uint16_t* src, char* dest) { // Copies a SQLSTATE read as SQLWCHAR into a character buffer. We know that SQLSTATEs are // composed of ASCII characters and we need one standard to compare when choosing // exceptions. // // Strangely, even when the error messages are UTF-8, PostgreSQL and MySQL encode the // sqlstate as UTF-16LE. We'll simply copy all non-zero bytes, with some checks for // running off the end of the buffers which will work for ASCII, UTF8, and UTF16 LE & BE. // It would work for UTF32 if I increase the size of the uint16_t buffer to handle it. // // (In the worst case, if a driver does something totally weird, we'll have an incomplete // SQLSTATE.) // const char* pchSrc = (const char*)src; const char* pchSrcMax = pchSrc + sizeof(uint16_t) * 5; char* pchDest = dest; // Where we are copying into dest char* pchDestMax = dest + 5; // We know a SQLSTATE is 5 characters long while (pchDest < pchDestMax && pchSrc < pchSrcMax) { if (*pchSrc) *pchDest++ = *pchSrc; pchSrc++; } *pchDest = 0; } #endif // _ERRORS_H_ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/getdata.cpp0000644000175100001770000005460014560207600015313 0ustar00runnerdocker // The functions for reading a single value from the database using SQLGetData. There is a different function for // every data type. #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "pyodbcmodule.h" #include "cursor.h" #include "connection.h" #include "errors.h" #include "dbspecific.h" #include "decimal.h" #include #include // NULL terminator notes: // // * pinfo->column_size, from SQLDescribeCol, does not include a NULL terminator. For example, column_size for a // char(10) column would be 10. (Also, when dealing with SQLWCHAR, it is the number of *characters*, not bytes.) // // * When passing a length to PyUnicode_FromStringAndSize and similar Unicode functions, do not add the NULL // terminator -- it will be added automatically. See objects/stringobject.c // // * SQLGetData does not return the NULL terminator in the length indicator. (Therefore, you can pass this value // directly to the Python string functions.) // // * SQLGetData will write a NULL terminator in the output buffer, so you must leave room for it. You must also // include the NULL terminator in the buffer length passed to SQLGetData. // // ODBC generalization: // 1) Include NULL terminators in input buffer lengths. // 2) NULL terminators are not used in data lengths. void GetData_init() { PyDateTime_IMPORT; } static byte* ReallocOrFreeBuffer(byte* pb, Py_ssize_t cbNeed); inline bool IsBinaryType(SQLSMALLINT sqltype) { // Is this SQL type (e.g. SQL_VARBINARY) a binary type or not? switch (sqltype) { case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: return true; } return false; } inline bool IsWideType(SQLSMALLINT sqltype) { switch (sqltype) { case SQL_WCHAR: case SQL_WVARCHAR: case SQL_WLONGVARCHAR: case SQL_SS_XML: case SQL_DB2_XML: return true; } return false; } static bool ReadVarColumn(Cursor* cur, Py_ssize_t iCol, SQLSMALLINT ctype, bool& isNull, byte*& pbResult, Py_ssize_t& cbResult) { // Called to read a variable-length column and return its data in a newly-allocated heap // buffer. // // Returns true if the read was successful and false if the read failed. If the read // failed a Python exception will have been set. // // If a non-null and non-empty value was read, pbResult will be set to a buffer containing // the data and cbResult will be set to the byte length. This length does *not* include a // null terminator. In this case the data *must* be freed using PyMem_Free. // // If a null value was read, isNull is set to true and pbResult and cbResult will be set to // 0. // // If a zero-length value was read, isNull is set to false and pbResult and cbResult will // be set to 0. isNull = false; pbResult = 0; cbResult = 0; const Py_ssize_t cbElement = (Py_ssize_t)(IsWideType(ctype) ? sizeof(uint16_t) : 1); const Py_ssize_t cbNullTerminator = IsBinaryType(ctype) ? 0 : cbElement; // TODO: Make the initial allocation size configurable? Py_ssize_t cbAllocated = 4096; Py_ssize_t cbUsed = 0; byte* pb = (byte*)PyMem_Malloc((size_t)cbAllocated); if (!pb) { PyErr_NoMemory(); return false; } SQLRETURN ret = SQL_SUCCESS_WITH_INFO; do { // Call SQLGetData in a loop as long as it keeps returning partial data (ret == // SQL_SUCCESS_WITH_INFO). Each time through, update the buffer pb, cbAllocated, and // cbUsed. Py_ssize_t cbAvailable = cbAllocated - cbUsed; SQLLEN cbData = 0; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), ctype, &pb[cbUsed], (SQLLEN)cbAvailable, &cbData); Py_END_ALLOW_THREADS; TRACE("ReadVarColumn: SQLGetData avail=%d --> ret=%d cbData=%d\n", (int)cbAvailable, (int)ret, (int)cbData); if (!SQL_SUCCEEDED(ret) && ret != SQL_NO_DATA) { RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); return false; } if (ret == SQL_SUCCESS && (int)cbData < 0) { // HACK: FreeTDS 0.91 on OS/X returns -4 for NULL data instead of SQL_NULL_DATA // (-1). I've traced into the code and it appears to be the result of assigning -1 // to a SQLLEN. We are going to treat all negative values as NULL. ret = SQL_NULL_DATA; cbData = 0; } // SQLGetData behavior is incredibly quirky: It doesn't tell us the total, the total // we've read, or even the amount just read. It returns the amount just read, plus any // remaining. Unfortunately, the only way to pick them apart is to subtract out the // amount of buffer we supplied. if (ret == SQL_SUCCESS_WITH_INFO) { // This means we read some data, but there is more. SQLGetData is very weird - it // sets cbRead to the number of bytes we read *plus* the amount remaining. Py_ssize_t cbRemaining = 0; // How many more bytes do we need to allocate, not including null? Py_ssize_t cbRead = 0; // How much did we just read, not including null? if (cbData == SQL_NO_TOTAL) { // This special value indicates there is more data but the driver can't tell us // how much more, so we'll just add whatever we want and try again. It also // tells us, however, that the buffer is full, so the amount we read equals the // amount we offered. Remember that if the type requires a null terminator, it // will be added *every* time, not just at the end, so we need to subtract it. cbRead = (cbAvailable - cbNullTerminator); cbRemaining = 1024 * 1024; } else if ((Py_ssize_t)cbData >= cbAvailable) { // We offered cbAvailable space, but there was cbData data. The driver filled // the buffer with what it could. Remember that if the type requires a null // terminator, the driver is going to append one on *every* read, so we need to // subtract them out. At least we know the exact data amount now and we can // allocate a precise amount. cbRead = (cbAvailable - cbNullTerminator); cbRemaining = cbData - cbRead; } else { // I would not expect to get here - we apparently read all of the data but the // driver did not return SQL_SUCCESS? cbRead = (cbData - cbNullTerminator); cbRemaining = 0; } cbUsed += cbRead; TRACE("Memory Need: cbRemaining=%ld cbRead=%ld\n", (long)cbRemaining, (long)cbRead); if (cbRemaining > 0) { // This is a tiny bit complicated by the fact that the data is null terminated, // meaning we haven't actually used up the entire buffer (cbAllocated), only // cbUsed (which should be cbAllocated - cbNullTerminator). Py_ssize_t cbNeed = cbUsed + cbRemaining + cbNullTerminator; pb = ReallocOrFreeBuffer(pb, cbNeed); if (!pb) return false; cbAllocated = cbNeed; } } else if (ret == SQL_SUCCESS) { // We read some data and this is the last batch (so we'll drop out of the // loop). // // If I'm reading the documentation correctly, SQLGetData is not going to // include the null terminator in cbRead. cbUsed += cbData; } } while (ret == SQL_SUCCESS_WITH_INFO); isNull = (ret == SQL_NULL_DATA); if (!isNull && cbUsed > 0) { pbResult = pb; cbResult = cbUsed; } else { PyMem_Free(pb); } return true; } static byte* ReallocOrFreeBuffer(byte* pb, Py_ssize_t cbNeed) { // Attempts to reallocate `pb` to size `cbNeed`. If the realloc fails, the original memory // is freed, a memory exception is set, and 0 is returned. Otherwise the new pointer is // returned. byte* pbNew = (byte*)PyMem_Realloc(pb, (size_t)cbNeed); if (pbNew == 0) { PyMem_Free(pb); PyErr_NoMemory(); return 0; } return pbNew; } static PyObject* GetText(Cursor* cur, Py_ssize_t iCol) { // We are reading one of the SQL_WCHAR, SQL_WVARCHAR, etc., and will return // a string. // // If there is no configuration we would expect this to be UTF-16 encoded data. (If no // byte-order-mark, we would expect it to be big-endian.) // // Now, just because the driver is telling us it is wide data doesn't mean it is true. // psqlodbc with UTF-8 will tell us it is wide data but you must ask for single-byte. // (Otherwise it is just UTF-8 with each character stored as 2 bytes.) That's why we allow // the user to configure. ColumnInfo* pinfo = &cur->colinfos[iCol]; const TextEnc& enc = IsWideType(pinfo->sql_type) ? cur->cnxn->sqlwchar_enc : cur->cnxn->sqlchar_enc; bool isNull = false; byte* pbData = 0; Py_ssize_t cbData = 0; if (!ReadVarColumn(cur, iCol, enc.ctype, isNull, pbData, cbData)) return 0; if (isNull) { assert(pbData == 0 && cbData == 0); Py_RETURN_NONE; } PyObject* result = TextBufferToObject(enc, pbData, cbData); PyMem_Free(pbData); return result; } static PyObject* GetBinary(Cursor* cur, Py_ssize_t iCol) { // Reads SQL_BINARY. bool isNull = false; byte* pbData = 0; Py_ssize_t cbData = 0; if (!ReadVarColumn(cur, iCol, SQL_C_BINARY, isNull, pbData, cbData)) return 0; if (isNull) { assert(pbData == 0 && cbData == 0); Py_RETURN_NONE; } PyObject* obj; obj = PyBytes_FromStringAndSize((char*)pbData, cbData); PyMem_Free(pbData); return obj; } static PyObject* GetDataUser(Cursor* cur, Py_ssize_t iCol, PyObject* func) { bool isNull = false; byte* pbData = 0; Py_ssize_t cbData = 0; if (!ReadVarColumn(cur, iCol, SQL_C_BINARY, isNull, pbData, cbData)) return 0; if (isNull) { assert(pbData == 0 && cbData == 0); Py_RETURN_NONE; } PyObject* value = PyBytes_FromStringAndSize((char*)pbData, cbData); PyMem_Free(pbData); if (!value) return 0; PyObject* result = PyObject_CallFunction(func, "(O)", value); Py_DECREF(value); if (!result) return 0; return result; } static PyObject* GetDataDecimal(Cursor* cur, Py_ssize_t iCol) { // The SQL_NUMERIC_STRUCT support is hopeless (SQL Server ignores scale on input parameters // and output columns, Oracle does something else weird, and many drivers don't support it // at all), so we'll rely on the Decimal's string parsing. Unfortunately, the Decimal // author does not pay attention to the locale, so we have to modify the string ourselves. // // Oracle inserts group separators (commas in US, periods in some countries), so leave room // for that too. // // Some databases support a 'money' type which also inserts currency symbols. Since we // don't want to keep track of all these, we'll ignore all characters we don't recognize. // We will look for digits, negative sign (which I hope is universal), and a decimal point // ('.' or ',' usually). We'll do everything as Unicode in case currencies, etc. are too // far out. // // This seems very inefficient. We know the characters we are interested in are ASCII // since they are -, ., and 0-9. There /could/ be a Unicode currency symbol, but I'm going // to ignore that for right now. Therefore if we ask for the data in SQLCHAR, it should be // ASCII even if the encoding is UTF-8. const TextEnc& enc = cur->cnxn->sqlwchar_enc; // I'm going to request the data as Unicode in case there is a weird currency symbol. If // this is a performance problems we may want a flag on this. bool isNull = false; byte* pbData = 0; Py_ssize_t cbData = 0; if (!ReadVarColumn(cur, iCol, enc.ctype, isNull, pbData, cbData)) return 0; if (isNull) { assert(pbData == 0 && cbData == 0); Py_RETURN_NONE; } Object result(DecimalFromText(enc, pbData, cbData)); PyMem_Free(pbData); return result.Detach(); } static PyObject* GetDataBit(Cursor* cur, Py_ssize_t iCol) { SQLCHAR ch; SQLLEN cbFetched; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_BIT, &ch, sizeof(ch), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; if (ch == SQL_TRUE) Py_RETURN_TRUE; Py_RETURN_FALSE; } static PyObject* GetDataLong(Cursor* cur, Py_ssize_t iCol) { ColumnInfo* pinfo = &cur->colinfos[iCol]; SQLINTEGER value; SQLLEN cbFetched; SQLRETURN ret; SQLSMALLINT nCType = pinfo->is_unsigned ? SQL_C_ULONG : SQL_C_LONG; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), nCType, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; if (pinfo->is_unsigned) return PyLong_FromLong(*(SQLINTEGER*)&value); return PyLong_FromLong(value); } static PyObject* GetDataLongLong(Cursor* cur, Py_ssize_t iCol) { ColumnInfo* pinfo = &cur->colinfos[iCol]; SQLSMALLINT nCType = pinfo->is_unsigned ? SQL_C_UBIGINT : SQL_C_SBIGINT; SQLBIGINT value; SQLLEN cbFetched; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), nCType, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; if (pinfo->is_unsigned) return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)(SQLUBIGINT)value); return PyLong_FromLongLong((PY_LONG_LONG)value); } static PyObject* GetDataDouble(Cursor* cur, Py_ssize_t iCol) { double value; SQLLEN cbFetched = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_DOUBLE, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; return PyFloat_FromDouble(value); } static PyObject* GetSqlServerTime(Cursor* cur, Py_ssize_t iCol) { SQL_SS_TIME2_STRUCT value; SQLLEN cbFetched = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_BINARY, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; int micros = (int)(value.fraction / 1000); // nanos --> micros return PyTime_FromTime(value.hour, value.minute, value.second, micros); } static PyObject* GetUUID(Cursor* cur, Py_ssize_t iCol) { // REVIEW: Since GUID is a fixed size, do we need to pass the size or cbFetched? PYSQLGUID guid; SQLLEN cbFetched = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_GUID, &guid, sizeof(guid), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; const char* szFmt = "(yyy#)"; Object args(Py_BuildValue(szFmt, NULL, NULL, &guid, (int)sizeof(guid))); if (!args) return 0; PyObject* uuid_type = GetClassForThread("uuid", "UUID"); if (!uuid_type) return 0; PyObject* uuid = PyObject_CallObject(uuid_type, args.Get()); Py_DECREF(uuid_type); return uuid; } static PyObject* GetDataTimestamp(Cursor* cur, Py_ssize_t iCol) { TIMESTAMP_STRUCT value; SQLLEN cbFetched = 0; SQLRETURN ret; struct tm t; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_TYPE_TIMESTAMP, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; switch (cur->colinfos[iCol].sql_type) { case SQL_TYPE_TIME: { int micros = (int)(value.fraction / 1000); // nanos --> micros return PyTime_FromTime(value.hour, value.minute, value.second, micros); } case SQL_TYPE_DATE: return PyDate_FromDate(value.year, value.month, value.day); case SQL_TYPE_TIMESTAMP: { if (value.year < 1) { value.year = 1; } else if (value.year > 9999) { value.year = 9999; } } } int micros = (int)(value.fraction / 1000); // nanos --> micros if (value.hour == 24) { // some backends support 24:00 (hh:mm) as "end of a day" t.tm_year = value.year - 1900; // tm_year is 1900-based t.tm_mon = value.month - 1; // tm_mon is zero-based t.tm_mday = value.day; t.tm_hour = value.hour; t.tm_min = value.minute; t.tm_sec = value.second; t.tm_isdst = -1; // auto-adjust for dst mktime(&t); // normalize values in t return PyDateTime_FromDateAndTime( t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, micros ); } return PyDateTime_FromDateAndTime(value.year, value.month, value.day, value.hour, value.minute, value.second, micros); } PyObject* PythonTypeFromSqlType(Cursor* cur, SQLSMALLINT type) { // Returns a type object ('int', 'str', etc.) for the given ODBC C type. This is used to populate // Cursor.description with the type of Python object that will be returned for each column. // // type // The ODBC C type (SQL_C_CHAR, etc.) of the column. // // The returned object does not have its reference count incremented (is a borrowed // reference). // // Keep this in sync with GetData below. if (cur->cnxn->map_sqltype_to_converter) { PyObject* func = Connection_GetConverter(cur->cnxn, type); if (func) return (PyObject*)&PyUnicode_Type; } PyObject* pytype = 0; bool incref = true; switch (type) { case SQL_CHAR: case SQL_VARCHAR: case SQL_LONGVARCHAR: pytype = (PyObject*)&PyUnicode_Type; break; case SQL_GUID: if (UseNativeUUID()) { pytype = GetClassForThread("uuid", "UUID"); incref = false; } else { pytype = (PyObject*)&PyUnicode_Type; } break; case SQL_WCHAR: case SQL_WVARCHAR: case SQL_WLONGVARCHAR: case SQL_SS_XML: case SQL_DB2_XML: pytype = (PyObject*)&PyUnicode_Type; break; case SQL_DECIMAL: case SQL_NUMERIC: pytype = GetClassForThread("decimal", "Decimal"); incref = false; break; case SQL_REAL: case SQL_FLOAT: case SQL_DOUBLE: pytype = (PyObject*)&PyFloat_Type; break; case SQL_SMALLINT: case SQL_INTEGER: case SQL_TINYINT: pytype = (PyObject*)&PyLong_Type; break; case SQL_TYPE_DATE: pytype = (PyObject*)PyDateTimeAPI->DateType; break; case SQL_TYPE_TIME: case SQL_SS_TIME2: // SQL Server 2008+ pytype = (PyObject*)PyDateTimeAPI->TimeType; break; case SQL_TYPE_TIMESTAMP: pytype = (PyObject*)PyDateTimeAPI->DateTimeType; break; case SQL_BIGINT: pytype = (PyObject*)&PyLong_Type; break; case SQL_BIT: pytype = (PyObject*)&PyBool_Type; break; case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: default: pytype = (PyObject*)&PyByteArray_Type; break; } if (pytype && incref) Py_INCREF(pytype); return pytype; } PyObject* GetData(Cursor* cur, Py_ssize_t iCol) { // Returns an object representing the value in the row/field. If 0 is returned, an exception has already been set. // // The data is assumed to be the default C type for the column's SQL type. ColumnInfo* pinfo = &cur->colinfos[iCol]; // First see if there is a user-defined conversion. if (cur->cnxn->map_sqltype_to_converter) { PyObject* func = Connection_GetConverter(cur->cnxn, pinfo->sql_type); if (func) { return GetDataUser(cur, iCol, func); } if (PyErr_Occurred()) return 0; } switch (pinfo->sql_type) { case SQL_WCHAR: case SQL_WVARCHAR: case SQL_WLONGVARCHAR: return GetText(cur, iCol); case SQL_CHAR: case SQL_VARCHAR: case SQL_LONGVARCHAR: case SQL_SS_XML: case SQL_DB2_XML: return GetText(cur, iCol); case SQL_GUID: if (UseNativeUUID()) return GetUUID(cur, iCol); return GetText(cur, iCol); break; case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: return GetBinary(cur, iCol); case SQL_DECIMAL: case SQL_NUMERIC: case SQL_DB2_DECFLOAT: return GetDataDecimal(cur, iCol); case SQL_BIT: return GetDataBit(cur, iCol); case SQL_TINYINT: case SQL_SMALLINT: case SQL_INTEGER: return GetDataLong(cur, iCol); case SQL_BIGINT: return GetDataLongLong(cur, iCol); case SQL_REAL: case SQL_FLOAT: case SQL_DOUBLE: return GetDataDouble(cur, iCol); case SQL_TYPE_DATE: case SQL_TYPE_TIME: case SQL_TYPE_TIMESTAMP: return GetDataTimestamp(cur, iCol); case SQL_SS_TIME2: return GetSqlServerTime(cur, iCol); } return RaiseErrorV("HY106", ProgrammingError, "ODBC SQL type %d is not yet supported. column-index=%zd type=%d", (int)pinfo->sql_type, iCol, (int)pinfo->sql_type); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/getdata.h0000644000175100001770000000063314560207600014755 0ustar00runnerdocker #ifndef _GETDATA_H_ #define _GETDATA_H_ void GetData_init(); PyObject* PythonTypeFromSqlType(Cursor* cur, SQLSMALLINT type); PyObject* GetData(Cursor* cur, Py_ssize_t iCol); /** * If this sql type has a user-defined conversion, the index into the connection's `conv_funcs` array is returned. * Otherwise -1 is returned. */ int GetUserConvIndex(Cursor* cur, SQLSMALLINT sql_type); #endif // _GETDATA_H_ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/params.cpp0000644000175100001770000017757314560207600015204 0ustar00runnerdocker// https://msdn.microsoft.com/en-us/library/ms711014(v=vs.85).aspx // // "The length of both the data buffer and the data it contains is measured in bytes, as // opposed to characters." // // https://msdn.microsoft.com/en-us/library/ms711786(v=vs.85).aspx // // Column Size: "For character types, this is the length in characters of the data" // NOTE: I have not ported the "fast executemany" code from 4.x to 5.x yet. Once 5.0 is // complete, I'll port it in 5.1. My goal is to ensure it uses the exact same binding code // between both code paths. I'll probably also rename the feature to something that describes // it more precisely like "array binding". #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "pyodbcmodule.h" #include "cursor.h" #include "params.h" #include "connection.h" #include "errors.h" #include "dbspecific.h" #include "row.h" #include inline Connection* GetConnection(Cursor* cursor) { return (Connection*)cursor->cnxn; } struct DAEParam { PyObject *cell; SQLLEN maxlen; }; static int DetectCType(PyObject *cell, ParamInfo *pi) { // Detects and sets the appropriate C type to use for binding the specified Python object. // Also sets the buffer length to use. Returns false if unsuccessful. // // We're setting the pi ParameterType and BufferLength. These are based on the Python // value if not None or binary. For those, the *existing* ParameterType is used. This // could be from a previous row or could have been initialized from SQLDescribeParam. PyObject* cls = 0; if (PyBool_Check(cell)) { Type_Bool: pi->ValueType = SQL_C_BIT; pi->BufferLength = 1; } else if (PyLong_Check(cell)) { Type_Long: if (pi->ParameterType == SQL_NUMERIC || pi->ParameterType == SQL_DECIMAL) { pi->ValueType = SQL_C_NUMERIC; pi->BufferLength = sizeof(SQL_NUMERIC_STRUCT); } else { pi->ValueType = SQL_C_SBIGINT; pi->BufferLength = sizeof(long long); } } else if (PyFloat_Check(cell)) { Type_Float: pi->ValueType = SQL_C_DOUBLE; pi->BufferLength = sizeof(double); } else if (PyBytes_Check(cell)) { Type_Bytes: // Assume the SQL type is also character (2.x) or binary (3.x). // If it is a max-type (ColumnSize == 0), use DAE. pi->ValueType = SQL_C_BINARY; pi->BufferLength = pi->ColumnSize ? pi->ColumnSize : sizeof(DAEParam); } else if (PyUnicode_Check(cell)) { Type_Unicode: // Assume the SQL type is also wide character. // If it is a max-type (ColumnSize == 0), use DAE. pi->ValueType = SQL_C_WCHAR; pi->BufferLength = pi->ColumnSize ? pi->ColumnSize * sizeof(SQLWCHAR) : sizeof(DAEParam); } else if (PyDateTime_Check(cell)) { Type_DateTime: pi->ValueType = SQL_C_TYPE_TIMESTAMP; pi->BufferLength = sizeof(SQL_TIMESTAMP_STRUCT); } else if (PyDate_Check(cell)) { Type_Date: pi->ValueType = SQL_C_TYPE_DATE; pi->BufferLength = sizeof(SQL_DATE_STRUCT); } else if (PyTime_Check(cell)) { Type_Time: if (pi->ParameterType == SQL_SS_TIME2) { pi->ValueType = SQL_C_BINARY; pi->BufferLength = sizeof(SQL_SS_TIME2_STRUCT); } else { pi->ValueType = SQL_C_TYPE_TIME; pi->BufferLength = sizeof(SQL_TIME_STRUCT); } } else if (PyByteArray_Check(cell)) { // Type_ByteArray: pi->ValueType = SQL_C_BINARY; pi->BufferLength = pi->ColumnSize ? pi->ColumnSize : sizeof(DAEParam); } else if (cell == Py_None || cell == null_binary) { // Use the SQL type to guess what Nones should be inserted as here. switch (pi->ParameterType) { case SQL_CHAR: case SQL_VARCHAR: case SQL_LONGVARCHAR: goto Type_Bytes; case SQL_WCHAR: case SQL_WVARCHAR: case SQL_WLONGVARCHAR: goto Type_Unicode; case SQL_DECIMAL: case SQL_NUMERIC: goto Type_Decimal; case SQL_BIGINT: goto Type_Long; case SQL_SMALLINT: case SQL_INTEGER: case SQL_TINYINT: goto Type_Long; case SQL_REAL: case SQL_FLOAT: case SQL_DOUBLE: goto Type_Float; case SQL_BIT: goto Type_Bool; case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: // TODO: Shouldn't this be bytes? // goto Type_ByteArray; goto Type_Bytes; case SQL_TYPE_DATE: goto Type_Date; case SQL_SS_TIME2: case SQL_TYPE_TIME: goto Type_Time; case SQL_TYPE_TIMESTAMP: goto Type_DateTime; case SQL_GUID: goto Type_UUID; default: goto Type_Bytes; } } else if (IsInstanceForThread(cell, "uuid", "UUID", &cls) && cls) { Type_UUID: // UUID pi->ValueType = SQL_C_GUID; pi->BufferLength = 16; } else if (IsInstanceForThread(cell, "decimal", "Decimal", &cls) && cls) { Type_Decimal: pi->ValueType = SQL_C_NUMERIC; pi->BufferLength = sizeof(SQL_NUMERIC_STRUCT); } else { RaiseErrorV(0, ProgrammingError, "Unknown object type %s during describe", cell->ob_type->tp_name); return false; } return true; } #define WRITEOUT(type, ptr, val, indv) { *(type*)(*ptr) = (val); *ptr += sizeof(type); indv = sizeof(type); } // Convert Python object into C data for binding. // Output pointer is written to with data, indicator, and updated. // Returns false if object could not be converted. static int PyToCType(Cursor *cur, unsigned char **outbuf, PyObject *cell, ParamInfo *pi) { PyObject *cls = 0; // TODO: Any way to make this a switch (O(1)) or similar instead of if-else chain? // TODO: Otherwise, rearrange these cases in order of frequency... SQLLEN ind; if (PyBool_Check(cell)) { if (pi->ValueType != SQL_C_BIT) return false; WRITEOUT(char, outbuf, cell == Py_True, ind); } else if (PyLong_Check(cell)) { if (pi->ValueType == SQL_C_SBIGINT) { WRITEOUT(long long, outbuf, PyLong_AsLongLong(cell), ind); } else if (pi->ValueType == SQL_C_NUMERIC) { // Convert a PyLong into a SQL_NUMERIC_STRUCT, without losing precision // or taking an unnecessary trip through character strings. SQL_NUMERIC_STRUCT *pNum = (SQL_NUMERIC_STRUCT*)*outbuf; PyObject *absVal = PyNumber_Absolute(cell); if (pi->DecimalDigits) { static PyObject *scaler_table[38]; static PyObject *tenObject; // Need to scale by 10**pi->DecimalDigits if (pi->DecimalDigits > 38) { NumericOverflow: RaiseErrorV(0, ProgrammingError, "Numeric overflow"); Py_XDECREF(absVal); return false; } if (!scaler_table[pi->DecimalDigits - 1]) { if (!tenObject) tenObject = PyLong_FromLong(10); PyObject *scaleObj = PyLong_FromLong(pi->DecimalDigits); scaler_table[pi->DecimalDigits - 1] = PyNumber_Power(tenObject, scaleObj, Py_None); Py_XDECREF(scaleObj); } PyObject *scaledVal = PyNumber_Multiply(absVal, scaler_table[pi->DecimalDigits - 1]); Py_XDECREF(absVal); absVal = scaledVal; } pNum->precision = (SQLCHAR)pi->ColumnSize; pNum->scale = (SQLCHAR)pi->DecimalDigits; pNum->sign = _PyLong_Sign(cell) >= 0; if (_PyLong_AsByteArray((PyLongObject*)absVal, pNum->val, sizeof(pNum->val), 1, 0)) goto NumericOverflow; Py_XDECREF(absVal); *outbuf += pi->BufferLength; ind = sizeof(SQL_NUMERIC_STRUCT); } else return false; } else if (PyFloat_Check(cell)) { if (pi->ValueType != SQL_C_DOUBLE) return false; WRITEOUT(double, outbuf, PyFloat_AS_DOUBLE(cell), ind); } else if (PyBytes_Check(cell)) { if (pi->ValueType != SQL_C_BINARY) return false; Py_ssize_t len = PyBytes_GET_SIZE(cell); if (!pi->ColumnSize) // DAE { DAEParam *pParam = (DAEParam*)*outbuf; Py_INCREF(cell); pParam->cell = cell; pParam->maxlen = cur->cnxn->GetMaxLength(pi->ValueType); *outbuf += sizeof(DAEParam); ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; } else { if (len > pi->BufferLength) { RaiseErrorV(0, ProgrammingError, "String data, right truncation: length %u buffer %u", len, pi->BufferLength); return false; } memcpy(*outbuf, PyBytes_AS_STRING(cell), len); *outbuf += pi->BufferLength; ind = len; } } else if (PyUnicode_Check(cell)) { if (pi->ValueType != SQL_C_WCHAR) return false; const TextEnc& enc = cur->cnxn->unicode_enc; Object encoded(PyCodec_Encode(cell, enc.name, "strict")); if (!encoded) return false; if (enc.optenc == OPTENC_NONE && !PyBytes_CheckExact(encoded)) { PyErr_Format(PyExc_TypeError, "Unicode write encoding '%s' returned unexpected data type: %s", enc.name, encoded.Get()->ob_type->tp_name); return false; } Py_ssize_t len = PyBytes_GET_SIZE(encoded); if (!pi->ColumnSize) { // DAE DAEParam *pParam = (DAEParam*)*outbuf; pParam->cell = encoded.Detach(); pParam->maxlen = cur->cnxn->GetMaxLength(pi->ValueType); *outbuf += sizeof(DAEParam); ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; } else { if (len > pi->BufferLength) { RaiseErrorV(0, ProgrammingError, "String data, right truncation: length %u buffer %u", len, pi->BufferLength); return false; } memcpy(*outbuf, PyBytes_AS_STRING((PyObject*)encoded), len); *outbuf += pi->BufferLength; ind = len; } } else if (PyDateTime_Check(cell)) { if (pi->ValueType != SQL_C_TYPE_TIMESTAMP) return false; SQL_TIMESTAMP_STRUCT *pts = (SQL_TIMESTAMP_STRUCT*)*outbuf; pts->year = PyDateTime_GET_YEAR(cell); pts->month = PyDateTime_GET_MONTH(cell); pts->day = PyDateTime_GET_DAY(cell); pts->hour = PyDateTime_DATE_GET_HOUR(cell); pts->minute = PyDateTime_DATE_GET_MINUTE(cell); pts->second = PyDateTime_DATE_GET_SECOND(cell); // Truncate the fraction according to precision size_t digits = min(9, pi->DecimalDigits); long fast_pow10[] = {1,10,100,1000,10000,100000,1000000,10000000,100000000,1000000000}; SQLUINTEGER milliseconds = PyDateTime_DATE_GET_MICROSECOND(cell) * 1000; pts->fraction = milliseconds - (milliseconds % fast_pow10[9 - digits]); *outbuf += sizeof(SQL_TIMESTAMP_STRUCT); ind = sizeof(SQL_TIMESTAMP_STRUCT); } else if (PyDate_Check(cell)) { if (pi->ValueType != SQL_C_TYPE_DATE) return false; SQL_DATE_STRUCT *pds = (SQL_DATE_STRUCT*)*outbuf; pds->year = PyDateTime_GET_YEAR(cell); pds->month = PyDateTime_GET_MONTH(cell); pds->day = PyDateTime_GET_DAY(cell); *outbuf += sizeof(SQL_DATE_STRUCT); ind = sizeof(SQL_DATE_STRUCT); } else if (PyTime_Check(cell)) { if (pi->ParameterType == SQL_SS_TIME2) { if (pi->ValueType != SQL_C_BINARY) return false; SQL_SS_TIME2_STRUCT *pt2s = (SQL_SS_TIME2_STRUCT*)*outbuf; pt2s->hour = PyDateTime_TIME_GET_HOUR(cell); pt2s->minute = PyDateTime_TIME_GET_MINUTE(cell); pt2s->second = PyDateTime_TIME_GET_SECOND(cell); // This is in units of nanoseconds. pt2s->fraction = PyDateTime_TIME_GET_MICROSECOND(cell)*1000; *outbuf += sizeof(SQL_SS_TIME2_STRUCT); ind = sizeof(SQL_SS_TIME2_STRUCT); } else { if (pi->ValueType != SQL_C_TYPE_TIME) return false; SQL_TIME_STRUCT *pts = (SQL_TIME_STRUCT*)*outbuf; pts->hour = PyDateTime_TIME_GET_HOUR(cell); pts->minute = PyDateTime_TIME_GET_MINUTE(cell); pts->second = PyDateTime_TIME_GET_SECOND(cell); *outbuf += sizeof(SQL_TIME_STRUCT); ind = sizeof(SQL_TIME_STRUCT); } } else if (PyByteArray_Check(cell)) { if (pi->ValueType != SQL_C_BINARY) return false; Py_ssize_t len = PyByteArray_GET_SIZE(cell); if (!pi->ColumnSize) // DAE { DAEParam *pParam = (DAEParam*)*outbuf; Py_INCREF(cell); pParam->cell = cell; pParam->maxlen = cur->cnxn->GetMaxLength(pi->ValueType); *outbuf += sizeof(DAEParam); ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; } else { if (len > pi->BufferLength) { RaiseErrorV(0, ProgrammingError, "String data, right truncation: length %u buffer %u", len, pi->BufferLength); return false; } memcpy(*outbuf, PyByteArray_AS_STRING(cell), len); *outbuf += pi->BufferLength; ind = len; } } else if (IsInstanceForThread(cell, "uuid", "UUID", &cls) && cls) { if (pi->ValueType != SQL_C_GUID) return false; pi->BufferLength = 16; // Do we need to use "bytes" on a big endian machine? Object b(PyObject_GetAttrString(cell, "bytes_le")); if (!b) return false; memcpy(*outbuf, PyBytes_AS_STRING(b.Get()), sizeof(SQLGUID)); *outbuf += pi->BufferLength; ind = 16; } else if (IsInstanceForThread(cell, "decimal", "Decimal", &cls) && cls) { if (pi->ValueType != SQL_C_NUMERIC) return false; // Normalise, then get sign, exponent, and digits. PyObject *normCell = PyObject_CallMethod(cell, "normalize", 0); if (!normCell) return false; PyObject *cellParts = PyObject_CallMethod(normCell, "as_tuple", 0); if (!cellParts) return false; Py_XDECREF(normCell); SQL_NUMERIC_STRUCT *pNum = (SQL_NUMERIC_STRUCT*)*outbuf; pNum->sign = !PyLong_AsLong(PyTuple_GET_ITEM(cellParts, 0)); PyObject* digits = PyTuple_GET_ITEM(cellParts, 1); long exp = PyLong_AsLong(PyTuple_GET_ITEM(cellParts, 2)); Py_ssize_t numDigits = PyTuple_GET_SIZE(digits); // PyDecimal is digits * 10**exp = digits / 10**-exp // SQL_NUMERIC_STRUCT is val / 10**scale Py_ssize_t scaleDiff = pi->DecimalDigits + exp; if (scaleDiff < 0) { RaiseErrorV(0, ProgrammingError, "Converting decimal loses precision"); return false; } // Append '0's to the end of the digits to effect the scaling. PyObject *newDigits = PyTuple_New(numDigits + scaleDiff); for (Py_ssize_t i = 0; i < numDigits; i++) { PyTuple_SET_ITEM(newDigits, i, PyLong_FromLong(PyNumber_AsSsize_t(PyTuple_GET_ITEM(digits, i), 0))); } for (Py_ssize_t i = numDigits; i < scaleDiff + numDigits; i++) { PyTuple_SET_ITEM(newDigits, i, PyLong_FromLong(0)); } PyObject *args = Py_BuildValue("((iOi))", 0, newDigits, 0); PyObject *scaledDecimal = PyObject_CallObject((PyObject*)cell->ob_type, args); PyObject *digitLong = PyNumber_Long(scaledDecimal); Py_XDECREF(args); Py_XDECREF(scaledDecimal); Py_XDECREF(newDigits); Py_XDECREF(cellParts); pNum->precision = (SQLCHAR)pi->ColumnSize; pNum->scale = (SQLCHAR)pi->DecimalDigits; int ret = _PyLong_AsByteArray((PyLongObject*)digitLong, pNum->val, sizeof(pNum->val), 1, 0); Py_XDECREF(digitLong); if (ret) { PyErr_Clear(); RaiseErrorV(0, ProgrammingError, "Numeric overflow"); return false; } *outbuf += pi->BufferLength; ind = sizeof(SQL_NUMERIC_STRUCT); } else if (cell == Py_None || cell == null_binary) { // REVIEW: Theoretically we could eliminate the initial call to SQLDescribeParam for // all columns if we had a special value for "unknown" and called SQLDescribeParam only // here when we hit it. Even then, only if we don't already have previous Python // objects! *outbuf += pi->BufferLength; ind = SQL_NULL_DATA; } else { RaiseErrorV(0, ProgrammingError, "Unknown object type: %s",cell->ob_type->tp_name); return false; } *(SQLLEN*)(*outbuf) = ind; *outbuf += sizeof(SQLLEN); return true; } static bool GetParamType(Cursor* cur, Py_ssize_t iParam, SQLSMALLINT& type); static void FreeInfos(ParamInfo* a, Py_ssize_t count) { for (Py_ssize_t i = 0; i < count; i++) { if (a[i].allocated) PyMem_Free(a[i].ParameterValuePtr); if (a[i].ParameterType == SQL_SS_TABLE && a[i].nested) FreeInfos(a[i].nested, a[i].maxlength); Py_XDECREF(a[i].pObject); } PyMem_Free(a); } static bool GetNullInfo(Cursor* cur, Py_ssize_t index, ParamInfo& info) { if (!GetParamType(cur, index, info.ParameterType)) return false; info.ValueType = SQL_C_DEFAULT; info.ColumnSize = 1; info.StrLen_or_Ind = SQL_NULL_DATA; return true; } static bool GetNullBinaryInfo(Cursor* cur, Py_ssize_t index, ParamInfo& info) { info.ValueType = SQL_C_BINARY; info.ParameterType = SQL_BINARY; info.ColumnSize = 1; info.ParameterValuePtr = 0; info.StrLen_or_Ind = SQL_NULL_DATA; return true; } static bool GetBytesInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { // The Python 3 version that writes bytes as binary data. Py_ssize_t cb = PyBytes_GET_SIZE(param); info.ValueType = SQL_C_BINARY; info.ColumnSize = isTVP ? 0 : (SQLUINTEGER)max(cb, 1); SQLLEN maxlength = cur->cnxn->GetMaxLength(info.ValueType); if (maxlength == 0 || cb <= maxlength || isTVP) { info.ParameterType = SQL_VARBINARY; info.StrLen_or_Ind = cb; info.BufferLength = (SQLLEN)info.ColumnSize; info.ParameterValuePtr = PyBytes_AS_STRING(param); } else { // Too long to pass all at once, so we'll provide the data at execute. info.ParameterType = SQL_LONGVARBINARY; info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)cb) : SQL_DATA_AT_EXEC; info.ParameterValuePtr = &info; info.BufferLength = sizeof(ParamInfo*); info.pObject = param; Py_INCREF(info.pObject); info.maxlength = maxlength; } return true; } static bool GetUnicodeInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { const TextEnc& enc = cur->cnxn->unicode_enc; info.ValueType = enc.ctype; Object encoded(PyCodec_Encode(param, enc.name, "strict")); if (!encoded) return false; if (enc.optenc == OPTENC_NONE && !PyBytes_CheckExact(encoded)) { PyErr_Format(PyExc_TypeError, "Unicode write encoding '%s' returned unexpected data type: %s", enc.name, encoded.Get()->ob_type->tp_name); return false; } Py_ssize_t cb = PyBytes_GET_SIZE(encoded); int denom = 1; if (enc.optenc == OPTENC_UTF16) { denom = 2; } else if (enc.optenc == OPTENC_UTF32) { denom = 4; } info.ColumnSize = isTVP ? 0 : (SQLUINTEGER)max(cb / denom, 1); info.pObject = encoded.Detach(); SQLLEN maxlength = cur->cnxn->GetMaxLength(enc.ctype); if (maxlength == 0 || cb <= maxlength || isTVP) { info.ParameterType = (enc.ctype == SQL_C_CHAR) ? SQL_VARCHAR : SQL_WVARCHAR; info.ParameterValuePtr = PyBytes_AS_STRING(info.pObject); info.BufferLength = (SQLINTEGER)cb; info.StrLen_or_Ind = (SQLINTEGER)cb; } else { // Too long to pass all at once, so we'll provide the data at execute. info.ParameterType = (enc.ctype == SQL_C_CHAR) ? SQL_LONGVARCHAR : SQL_WLONGVARCHAR; info.ParameterValuePtr = &info; info.BufferLength = sizeof(ParamInfo*); info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLINTEGER)cb) : SQL_DATA_AT_EXEC; info.maxlength = maxlength; } return true; } static bool GetBooleanInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.ValueType = SQL_C_BIT; info.ParameterType = SQL_BIT; info.StrLen_or_Ind = 1; info.Data.ch = (unsigned char)(param == Py_True ? 1 : 0); info.ParameterValuePtr = &info.Data.ch; return true; } static bool GetDateTimeInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.Data.timestamp.year = (SQLSMALLINT) PyDateTime_GET_YEAR(param); info.Data.timestamp.month = (SQLUSMALLINT)PyDateTime_GET_MONTH(param); info.Data.timestamp.day = (SQLUSMALLINT)PyDateTime_GET_DAY(param); info.Data.timestamp.hour = (SQLUSMALLINT)PyDateTime_DATE_GET_HOUR(param); info.Data.timestamp.minute = (SQLUSMALLINT)PyDateTime_DATE_GET_MINUTE(param); info.Data.timestamp.second = (SQLUSMALLINT)PyDateTime_DATE_GET_SECOND(param); // SQL Server chokes if the fraction has more data than the database supports. We expect other databases to be the // same, so we reduce the value to what the database supports. http://support.microsoft.com/kb/263872 int precision = ((Connection*)cur->cnxn)->datetime_precision - 20; // (20 includes a separating period) if (precision <= 0) { info.Data.timestamp.fraction = 0; } else { info.Data.timestamp.fraction = (SQLUINTEGER)(PyDateTime_DATE_GET_MICROSECOND(param) * 1000); // 1000 == micro -> nano // (How many leading digits do we want to keep? With SQL Server 2005, this should be 3: 123000000) int keep = (int)pow(10.0, 9-min(9, precision)); info.Data.timestamp.fraction = info.Data.timestamp.fraction / keep * keep; info.DecimalDigits = (SQLSMALLINT)precision; } info.ValueType = SQL_C_TIMESTAMP; info.ParameterType = SQL_TIMESTAMP; info.ColumnSize = (SQLUINTEGER)((Connection*)cur->cnxn)->datetime_precision; info.StrLen_or_Ind = sizeof(TIMESTAMP_STRUCT); info.ParameterValuePtr = &info.Data.timestamp; return true; } static bool GetDateInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.Data.date.year = (SQLSMALLINT) PyDateTime_GET_YEAR(param); info.Data.date.month = (SQLUSMALLINT)PyDateTime_GET_MONTH(param); info.Data.date.day = (SQLUSMALLINT)PyDateTime_GET_DAY(param); info.ValueType = SQL_C_TYPE_DATE; info.ParameterType = SQL_TYPE_DATE; info.ColumnSize = 10; info.ParameterValuePtr = &info.Data.date; info.StrLen_or_Ind = sizeof(DATE_STRUCT); return true; } static bool GetTimeInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.Data.time.hour = (SQLUSMALLINT)PyDateTime_TIME_GET_HOUR(param); info.Data.time.minute = (SQLUSMALLINT)PyDateTime_TIME_GET_MINUTE(param); info.Data.time.second = (SQLUSMALLINT)PyDateTime_TIME_GET_SECOND(param); info.ValueType = SQL_C_TYPE_TIME; info.ParameterType = SQL_TYPE_TIME; info.ColumnSize = 8; info.ParameterValuePtr = &info.Data.time; info.StrLen_or_Ind = sizeof(TIME_STRUCT); return true; } inline bool NeedsBigInt(long long ll) { // NOTE: Smallest 32-bit int should be -214748368 but the MS compiler v.1900 AMD64 // says that (10 < -2147483648). Perhaps I miscalculated the minimum? return ll < -2147483647 || ll > 2147483647; } static bool GetLongInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { // Since some drivers like Access don't support BIGINT, we use INTEGER when possible. // Unfortunately this may mean that we end up with two execution plans for the same SQL. // We could use SQLDescribeParam but that's kind of expensive. long long value = PyLong_AsLongLong(param); if (PyErr_Occurred()) return false; if (isTVP || NeedsBigInt(value)) { info.Data.i64 = (INT64)value; info.ValueType = SQL_C_SBIGINT; info.ParameterType = SQL_BIGINT; info.ParameterValuePtr = &info.Data.i64; info.StrLen_or_Ind = 8; } else { info.Data.i32 = (int)value; info.ValueType = SQL_C_LONG; info.ParameterType = SQL_INTEGER; info.ParameterValuePtr = &info.Data.i32; info.StrLen_or_Ind = 4; } return true; } static bool GetFloatInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { // Python floats are usually numeric values, but can also be "Infinity" or "NaN". // https://docs.python.org/3/library/functions.html#float // PyFloat_AsDouble() does not generate an error for Infinity/NaN, and it is not // easy to check for those values. Typically, the database will reject them. double value = PyFloat_AsDouble(param); if (PyErr_Occurred()) return false; info.Data.dbl = value; info.ValueType = SQL_C_DOUBLE; info.ParameterType = SQL_DOUBLE; info.ParameterValuePtr = &info.Data.dbl; info.ColumnSize = 15; info.StrLen_or_Ind = sizeof(double); return true; } static char* CreateDecimalString(long sign, PyObject* digits, long exp) { // Allocate an ASCII string containing the given decimal. long count = (long)PyTuple_GET_SIZE(digits); char* pch; long len; if (exp >= 0) { // (1 2 3) exp = 2 --> '12300' len = sign + count + exp + 1; // 1: NULL pch = (char*)PyMem_Malloc((size_t)len); if (pch) { char* p = pch; if (sign) *p++ = '-'; for (long i = 0; i < count; i++) *p++ = (char)('0' + PyLong_AS_LONG(PyTuple_GET_ITEM(digits, i))); for (long i = 0; i < exp; i++) *p++ = '0'; *p = 0; } } else if (-exp < count) { // (1 2 3) exp = -2 --> 1.23 : prec = 3, scale = 2 len = sign + count + 2; // 2: decimal + NULL pch = (char*)PyMem_Malloc((size_t)len); if (pch) { char* p = pch; if (sign) *p++ = '-'; int i = 0; for (; i < (count + exp); i++) *p++ = (char)('0' + PyLong_AS_LONG(PyTuple_GET_ITEM(digits, i))); *p++ = '.'; for (; i < count; i++) *p++ = (char)('0' + PyLong_AS_LONG(PyTuple_GET_ITEM(digits, i))); *p++ = 0; } } else { // (1 2 3) exp = -5 --> 0.00123 : prec = 5, scale = 5 len = sign + -exp + 3; // 3: leading zero + decimal + NULL pch = (char*)PyMem_Malloc((size_t)len); if (pch) { char* p = pch; if (sign) *p++ = '-'; *p++ = '0'; *p++ = '.'; for (int i = 0; i < -(exp + count); i++) *p++ = '0'; for (int i = 0; i < count; i++) *p++ = (char)('0' + PyLong_AS_LONG(PyTuple_GET_ITEM(digits, i))); *p++ = 0; } } assert(pch == 0 || (int)(strlen(pch) + 1) == len); return pch; } static bool GetUUIDInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, PyObject* uuid_type) { // uuid_type: This is a new reference that we are responsible for freeing. Object tmp(uuid_type); info.ValueType = SQL_C_GUID; info.ParameterType = SQL_GUID; info.ColumnSize = 16; info.allocated = true; info.ParameterValuePtr = PyMem_Malloc(sizeof(SQLGUID)); if (!info.ParameterValuePtr) { PyErr_NoMemory(); return false; } // Do we need to use "bytes" on a big endian machine? Object b(PyObject_GetAttrString(param, "bytes_le")); if (!b) return false; memcpy(info.ParameterValuePtr, PyBytes_AS_STRING(b.Get()), sizeof(SQLGUID)); info.StrLen_or_Ind = sizeof(SQLGUID); return true; } static bool GetDecimalInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, PyObject* decimal_type) { // decimal_type: This is a new reference that we are responsible for freeing. Object tmp(decimal_type); // The NUMERIC structure never works right with SQL Server and probably a lot of other drivers. We'll bind as a // string. Unfortunately, the Decimal class doesn't seem to have a way to force it to return a string without // exponents, so we'll have to build it ourselves. Object t(PyObject_CallMethod(param, "as_tuple", 0)); if (!t) return false; long sign = PyLong_AsLong(PyTuple_GET_ITEM(t.Get(), 0)); PyObject* digits = PyTuple_GET_ITEM(t.Get(), 1); long exp = PyLong_AsLong(PyTuple_GET_ITEM(t.Get(), 2)); Py_ssize_t count = PyTuple_GET_SIZE(digits); info.ValueType = SQL_C_CHAR; info.ParameterType = SQL_NUMERIC; if (exp >= 0) { // (1 2 3) exp = 2 --> '12300' info.ColumnSize = (SQLUINTEGER)count + exp; info.DecimalDigits = 0; } else if (-exp <= count) { // (1 2 3) exp = -2 --> 1.23 : prec = 3, scale = 2 info.ColumnSize = (SQLUINTEGER)count; info.DecimalDigits = (SQLSMALLINT)-exp; } else { // (1 2 3) exp = -5 --> 0.00123 : prec = 5, scale = 5 info.ColumnSize = (SQLUINTEGER)(-exp); info.DecimalDigits = (SQLSMALLINT)info.ColumnSize; } assert(info.ColumnSize >= (SQLULEN)info.DecimalDigits); info.ParameterValuePtr = CreateDecimalString(sign, digits, exp); if (!info.ParameterValuePtr) { PyErr_NoMemory(); return false; } info.allocated = true; info.StrLen_or_Ind = (SQLINTEGER)strlen((char*)info.ParameterValuePtr); return true; } static bool GetByteArrayInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { info.ValueType = SQL_C_BINARY; Py_ssize_t cb = PyByteArray_Size(param); SQLLEN maxlength = cur->cnxn->GetMaxLength(info.ValueType); if (maxlength == 0 || cb <= maxlength || isTVP) { info.ParameterType = SQL_VARBINARY; info.ParameterValuePtr = (SQLPOINTER)PyByteArray_AsString(param); info.BufferLength = (SQLINTEGER)cb; info.ColumnSize = isTVP?0:(SQLUINTEGER)max(cb, 1); info.StrLen_or_Ind = (SQLINTEGER)cb; } else { info.ParameterType = SQL_LONGVARBINARY; info.ParameterValuePtr = &info; info.BufferLength = sizeof(ParamInfo*); info.ColumnSize = (SQLUINTEGER)cb; info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)cb) : SQL_DATA_AT_EXEC; info.pObject = param; Py_INCREF(info.pObject); info.maxlength = maxlength; } return true; } // TVP static bool GetTableInfo(Cursor *cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { // This is used for SQL Server's "table valued parameters" or TVPs. int nskip = 0; Py_ssize_t nrows = PySequence_Size(param); if (nrows > 0) { PyObject *cell0 = PySequence_GetItem(param, 0); if (cell0 == NULL) { return false; } if (PyBytes_Check(cell0) || PyUnicode_Check(cell0)) { nskip++; if (nrows > 1) { PyObject *cell1 = PySequence_GetItem(param, 1); nskip += (PyBytes_Check(cell1) || PyUnicode_Check(cell1)); Py_XDECREF(cell1); } } Py_XDECREF(cell0); } nrows -= nskip; if (!nskip) { // Need to describe in order to fill in IPD with the TVP's type name, because user has // not provided it SQLSMALLINT tvptype; SQLDescribeParam(cur->hstmt, index + 1, &tvptype, 0, 0, 0); } info.pObject = param; Py_INCREF(param); info.ValueType = SQL_C_BINARY; info.ParameterType = SQL_SS_TABLE; info.ColumnSize = nrows; info.DecimalDigits = 0; info.ParameterValuePtr = &info; info.BufferLength = 0; info.curTvpRow = nskip; info.StrLen_or_Ind = SQL_DATA_AT_EXEC; info.allocated = false; return true; } bool GetParameterInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { // Determines the type of SQL parameter that will be used for this parameter based on the Python data type. // // Populates `info`. if (param == Py_None) return GetNullInfo(cur, index, info); if (param == null_binary) return GetNullBinaryInfo(cur, index, info); if (PyBytes_Check(param)) return GetBytesInfo(cur, index, param, info, isTVP); if (PyUnicode_Check(param)) return GetUnicodeInfo(cur, index, param, info, isTVP); if (PyBool_Check(param)) return GetBooleanInfo(cur, index, param, info); if (PyDateTime_Check(param)) return GetDateTimeInfo(cur, index, param, info); if (PyDate_Check(param)) return GetDateInfo(cur, index, param, info); if (PyTime_Check(param)) return GetTimeInfo(cur, index, param, info); if (PyLong_Check(param)) return GetLongInfo(cur, index, param, info, isTVP); if (PyFloat_Check(param)) return GetFloatInfo(cur, index, param, info); if (PyByteArray_Check(param)) return GetByteArrayInfo(cur, index, param, info, isTVP); PyObject* cls = 0; if (!IsInstanceForThread(param, "decimal", "Decimal", &cls)) return false; if (cls != 0) return GetDecimalInfo(cur, index, param, info, cls); if (!IsInstanceForThread(param, "uuid", "UUID", &cls)) return false; if (cls != 0) return GetUUIDInfo(cur, index, param, info, cls); if (PySequence_Check(param)) return GetTableInfo(cur, index, param, info); RaiseErrorV("HY105", ProgrammingError, "Invalid parameter type. param-index=%zd param-type=%s", index, Py_TYPE(param)->tp_name); return false; } static bool getObjectValue(PyObject *pObject, long& nValue) { if (pObject == NULL) return false; if (PyLong_Check(pObject)) { nValue = PyLong_AsLong(pObject); return true; } return false; } static long getSequenceValue(PyObject *pSequence, Py_ssize_t nIndex, long nDefault, bool &bChanged) { PyObject *obj; long v = nDefault; obj = PySequence_GetItem(pSequence, nIndex); if (obj != NULL) { if (getObjectValue(obj, v)) bChanged = true; } Py_CLEAR(obj); return v; } /** * UpdateParamInfo updates the current columnsizes with the information provided * by a set from the client code, to manually override values returned by SQLDescribeParam() * which can be wrong in case of SQL Server statements. * * sparhawk@gmx.at (Gerhard Gruber) */ static bool UpdateParamInfo(Cursor* pCursor, Py_ssize_t nIndex, ParamInfo *pInfo) { if (pCursor->inputsizes == NULL || nIndex >= PySequence_Length(pCursor->inputsizes)) return false; PyObject *desc = PySequence_GetItem(pCursor->inputsizes, nIndex); if (desc == NULL) return false; bool rc = false; long v; bool clearError = true; // If the error was already set before we entered here, it is not from us, so we leave it alone. if (PyErr_Occurred()) clearError = false; // integer - sets colsize // type object - sets sqltype (mapping between Python and SQL types is not 1:1 so it may not always work) // Consider: sequence of (colsize, sqltype, scale) if (getObjectValue(desc, v)) { pInfo->ColumnSize = (SQLULEN)v; rc = true; } else if (PySequence_Check(desc)) { pInfo->ParameterType = (SQLSMALLINT)getSequenceValue(desc, 0, (long)pInfo->ParameterType, rc); pInfo->ColumnSize = (SQLUINTEGER)getSequenceValue(desc, 1, (long)pInfo->ColumnSize, rc); pInfo->DecimalDigits = (SQLSMALLINT)getSequenceValue(desc, 2, (long)pInfo->ColumnSize, rc); } Py_CLEAR(desc); // If the user didn't provide the full array (in case he gave us an array), the above code would // set an internal error on the cursor object, as we try to read three values from an array // which may not have as many. This is ok, because we don't really care if the array is not completely // specified, so we clear the error in case it comes from this. If the error was already present before that // we keep it, so the user can handle it. if (clearError) PyErr_Clear(); return rc; } bool BindParameter(Cursor* cur, Py_ssize_t index, ParamInfo& info) { SQLSMALLINT sqltype = info.ParameterType; SQLULEN colsize = info.ColumnSize; SQLSMALLINT scale = info.DecimalDigits; if (UpdateParamInfo(cur, index, &info)) { // Reload in case it has changed. colsize = info.ColumnSize; sqltype = info.ParameterType; scale = info.DecimalDigits; } TRACE("BIND: param=%ld ValueType=%d (%s) ParameterType=%d (%s) ColumnSize=%ld DecimalDigits=%d BufferLength=%ld *pcb=%ld\n", (index+1), info.ValueType, CTypeName(info.ValueType), sqltype, SqlTypeName(sqltype), colsize, scale, info.BufferLength, info.StrLen_or_Ind); SQLRETURN ret = -1; Py_BEGIN_ALLOW_THREADS ret = SQLBindParameter(cur->hstmt, (SQLUSMALLINT)(index + 1), SQL_PARAM_INPUT, info.ValueType, sqltype, colsize, scale, sqltype == SQL_SS_TABLE ? 0 : info.ParameterValuePtr, info.BufferLength, &info.StrLen_or_Ind); Py_END_ALLOW_THREADS; if (GetConnection(cur)->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cur->cnxn, "SQLBindParameter", GetConnection(cur)->hdbc, cur->hstmt); return false; } // This is a TVP. Enter and bind its parameters, allocate descriptors for its columns (all as DAE) if (sqltype == SQL_SS_TABLE) { Py_ssize_t nrows = PySequence_Size(info.pObject); if (nrows > 0) { PyObject *cell0 = PySequence_GetItem(info.pObject, 0); Py_XDECREF(cell0); if (PyBytes_Check(cell0) || PyUnicode_Check(cell0)) { SQLHDESC desc; PyObject *tvpname = PyCodec_Encode(cell0, "UTF-16LE", 0); SQLGetStmtAttr(cur->hstmt, SQL_ATTR_IMP_PARAM_DESC, &desc, 0, 0); SQLSetDescFieldW(desc, index + 1, SQL_CA_SS_TYPE_NAME, (SQLPOINTER)PyBytes_AsString(tvpname), PyBytes_Size(tvpname)); Py_XDECREF(tvpname); if (nrows > 1) { PyObject *cell1 = PySequence_GetItem(info.pObject, 1); Py_XDECREF(cell1); if (PyBytes_Check(cell1) || PyUnicode_Check(cell1)) { PyObject *tvpschema = PyCodec_Encode(cell1, "UTF-16LE", 0); SQLSetDescFieldW(desc, index + 1, SQL_CA_SS_SCHEMA_NAME, (SQLPOINTER)PyBytes_AsString(tvpschema), PyBytes_Size(tvpschema)); Py_XDECREF(tvpschema); } } } } SQLHDESC desc; SQLGetStmtAttr(cur->hstmt, SQL_ATTR_APP_PARAM_DESC, &desc, 0, 0); SQLSetDescField(desc, index + 1, SQL_DESC_DATA_PTR, (SQLPOINTER)info.ParameterValuePtr, 0); int err = 0; ret = SQLSetStmtAttr(cur->hstmt, SQL_SOPT_SS_PARAM_FOCUS, (SQLPOINTER)(index + 1), SQL_IS_INTEGER); if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cur->cnxn, "SQLSetStmtAttr", GetConnection(cur)->hdbc, cur->hstmt); return false; } Py_ssize_t i = PySequence_Size(info.pObject) - info.ColumnSize; Py_ssize_t ncols = 0; while (i >= 0 && i < PySequence_Size(info.pObject)) { PyObject *row = PySequence_GetItem(info.pObject, i); Py_XDECREF(row); if (!PySequence_Check(row)) { RaiseErrorV(0, ProgrammingError, "A TVP's rows must be Sequence objects."); err = 1; break; } if (ncols && ncols != PySequence_Size(row)) { RaiseErrorV(0, ProgrammingError, "A TVP's rows must all be the same size."); err = 1; break; } ncols = PySequence_Size(row); i++; } if (!ncols) { // TVP has no columns --- is null info.nested = 0; info.StrLen_or_Ind = SQL_DEFAULT_PARAM; } else { PyObject *row = PySequence_GetItem(info.pObject, PySequence_Size(info.pObject) - info.ColumnSize); Py_XDECREF(row); info.nested = (ParamInfo*)PyMem_Malloc(ncols * sizeof(ParamInfo)); info.maxlength = ncols; memset(info.nested, 0, ncols * sizeof(ParamInfo)); for(i=0;ihstmt, (SQLUSMALLINT)(i + 1), SQL_PARAM_INPUT, info.nested[i].ValueType, info.nested[i].ParameterType, info.nested[i].ColumnSize, info.nested[i].DecimalDigits, info.nested + i, info.nested[i].BufferLength, &info.nested[i].StrLen_or_Ind); Py_END_ALLOW_THREADS; if (GetConnection(cur)->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cur->cnxn, "SQLBindParameter", GetConnection(cur)->hdbc, cur->hstmt); return false; } } } ret = SQLSetStmtAttr(cur->hstmt, SQL_SOPT_SS_PARAM_FOCUS, 0, SQL_IS_INTEGER); if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cur->cnxn, "SQLSetStmtAttr", GetConnection(cur)->hdbc, cur->hstmt); return false; } if (err) return false; } return true; } void FreeParameterData(Cursor* cur) { // Unbinds the parameters and frees the parameter buffer. if (cur->paramInfos) { // MS ODBC will crash if we use an HSTMT after the HDBC has been freed. if (cur->cnxn->hdbc != SQL_NULL_HANDLE) { Py_BEGIN_ALLOW_THREADS SQLFreeStmt(cur->hstmt, SQL_RESET_PARAMS); Py_END_ALLOW_THREADS } FreeInfos(cur->paramInfos, cur->paramcount); cur->paramInfos = 0; } } void FreeParameterInfo(Cursor* cur) { // Internal function to free just the cached parameter information. This is not used by the general cursor code // since this information is also freed in the less granular free_results function that clears everything. Py_XDECREF(cur->pPreparedSQL); PyMem_Free(cur->paramtypes); cur->pPreparedSQL = 0; cur->paramtypes = 0; cur->paramcount = 0; } bool Prepare(Cursor* cur, PyObject* pSql) { // // Prepare the SQL if necessary. // if (pSql != cur->pPreparedSQL) { FreeParameterInfo(cur); SQLRETURN ret = 0; SQLSMALLINT cParamsT = 0; const char* szErrorFunc = "SQLPrepare"; const TextEnc* penc; penc = &cur->cnxn->unicode_enc; Object query(penc->Encode(pSql)); if (!query) return 0; bool isWide = (penc->ctype == SQL_C_WCHAR); const char* pch = PyBytes_AS_STRING(query.Get()); SQLINTEGER cch = (SQLINTEGER)(PyBytes_GET_SIZE(query.Get()) / (isWide ? sizeof(uint16_t) : 1)); TRACE("SQLPrepare(%s)\n", pch); Py_BEGIN_ALLOW_THREADS if (isWide) ret = SQLPrepareW(cur->hstmt, (SQLWCHAR*)pch, cch); else ret = SQLPrepare(cur->hstmt, (SQLCHAR*)pch, cch); if (SQL_SUCCEEDED(ret)) { szErrorFunc = "SQLNumParams"; ret = SQLNumParams(cur->hstmt, &cParamsT); } Py_END_ALLOW_THREADS if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cur->cnxn, szErrorFunc, GetConnection(cur)->hdbc, cur->hstmt); return false; } cur->paramcount = (int)cParamsT; cur->pPreparedSQL = pSql; Py_INCREF(cur->pPreparedSQL); } return true; } bool PrepareAndBind(Cursor* cur, PyObject* pSql, PyObject* original_params, bool skip_first) { // // Normalize the parameter variables. // // Since we may replace parameters (we replace objects with Py_True/Py_False when writing to a bit/bool column), // allocate an array and use it instead of the original sequence int params_offset = skip_first ? 1 : 0; Py_ssize_t cParams = original_params == 0 ? 0 : PySequence_Length(original_params) - params_offset; if (!Prepare(cur, pSql)) return false; if (cParams != cur->paramcount) { RaiseErrorV(0, ProgrammingError, "The SQL contains %d parameter markers, but %d parameters were supplied", cur->paramcount, cParams); return false; } cur->paramInfos = (ParamInfo*)PyMem_Malloc(sizeof(ParamInfo) * cParams); if (cur->paramInfos == 0) { PyErr_NoMemory(); return 0; } memset(cur->paramInfos, 0, sizeof(ParamInfo) * cParams); // Since you can't call SQLDesribeParam *after* calling SQLBindParameter, we'll loop through all of the // GetParameterInfos first, then bind. for (Py_ssize_t i = 0; i < cParams; i++) { Object param(PySequence_GetItem(original_params, i + params_offset)); if (!GetParameterInfo(cur, i, param, cur->paramInfos[i], false)) { FreeInfos(cur->paramInfos, cParams); cur->paramInfos = 0; return false; } } for (Py_ssize_t i = 0; i < cParams; i++) { if (!BindParameter(cur, i, cur->paramInfos[i])) { FreeInfos(cur->paramInfos, cParams); cur->paramInfos = 0; return false; } } return true; } bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj) { bool ret = true; char *szLastFunction = 0; SQLRETURN rc = SQL_SUCCESS; if (!Prepare(cur, pSql)) return false; if (!(cur->paramInfos = (ParamInfo*)PyMem_Malloc(sizeof(ParamInfo) * cur->paramcount))) { PyErr_NoMemory(); return false; } memset(cur->paramInfos, 0, sizeof(ParamInfo) * cur->paramcount); // Wouldn't hurt to free threads here? Or is this fast enough because it is local? // Describe each parameter (SQL type) in preparation for allocation of paramset array for (Py_ssize_t i = 0; i < cur->paramcount; i++) { SQLSMALLINT nullable; if (!SQL_SUCCEEDED(SQLDescribeParam(cur->hstmt, i + 1, &(cur->paramInfos[i].ParameterType), &cur->paramInfos[i].ColumnSize, &cur->paramInfos[i].DecimalDigits, &nullable))) { // Default to a medium-length varchar if describing the parameter didn't work cur->paramInfos[i].ParameterType = SQL_VARCHAR; cur->paramInfos[i].ColumnSize = 255; cur->paramInfos[i].DecimalDigits = 0; } // This supports overriding of input sizes via setinputsizes // See issue 380 // The logic is duplicated from BindParameter UpdateParamInfo(cur, i, &cur->paramInfos[i]); } PyObject *rowseq = PySequence_Fast(paramArrayObj, "Parameter array must be a sequence."); if (!rowseq) { ErrorRet1: if (cur->paramInfos) FreeInfos(cur->paramInfos, cur->paramcount); cur->paramInfos = 0; return false; } Py_ssize_t rowcount = PySequence_Fast_GET_SIZE(rowseq); PyObject **rowptr = PySequence_Fast_ITEMS(rowseq); Py_ssize_t r = 0; while ( r < rowcount ) { // Scan current row to determine C types PyObject *currow = *rowptr++; // REVIEW: This check is not needed - PySequence_Fast below is sufficient. if (!PyTuple_Check(currow) && !PyList_Check(currow) && !Row_Check(currow)) { RaiseErrorV(0, PyExc_TypeError, "Params must be in a list, tuple, or Row"); ErrorRet2: Py_XDECREF(rowseq); goto ErrorRet1; } PyObject *colseq = PySequence_Fast(currow, "Row must be a sequence."); if (!colseq) { goto ErrorRet2; } if (PySequence_Fast_GET_SIZE(colseq) != cur->paramcount) { RaiseErrorV(0, ProgrammingError, "Expected %u parameters, supplied %u", cur->paramcount, PySequence_Fast_GET_SIZE(colseq)); ErrorRet3: Py_XDECREF(colseq); goto ErrorRet2; } PyObject **cells = PySequence_Fast_ITEMS(colseq); // REVIEW: We need a better description of what is going on here. Why is it OK to pass // a fake bindptr to SQLBindParameter. // Start at a non-zero offset to prevent null pointer detection. char *bindptr = (char*)16; Py_ssize_t i = 0; for (; i < cur->paramcount; i++) { if (!DetectCType(cells[i], &cur->paramInfos[i])) { goto ErrorRet3; } if (!SQL_SUCCEEDED(SQLBindParameter(cur->hstmt, i + 1, SQL_PARAM_INPUT, cur->paramInfos[i].ValueType, cur->paramInfos[i].ParameterType, cur->paramInfos[i].ColumnSize, cur->paramInfos[i].DecimalDigits, bindptr, cur->paramInfos[i].BufferLength, (SQLLEN*)(bindptr + cur->paramInfos[i].BufferLength)))) { RaiseErrorFromHandle(cur->cnxn, "SQLBindParameter", GetConnection(cur)->hdbc, cur->hstmt); ErrorRet4: SQLFreeStmt(cur->hstmt, SQL_RESET_PARAMS); goto ErrorRet3; } if (cur->paramInfos[i].ValueType == SQL_C_NUMERIC) { SQLHDESC desc; SQLGetStmtAttr(cur->hstmt, SQL_ATTR_APP_PARAM_DESC, &desc, 0, 0); SQLSetDescField(desc, i + 1, SQL_DESC_TYPE, (SQLPOINTER)SQL_C_NUMERIC, 0); SQLSetDescField(desc, i + 1, SQL_DESC_PRECISION, (SQLPOINTER)cur->paramInfos[i].ColumnSize, 0); SQLSetDescField(desc, i + 1, SQL_DESC_SCALE, (SQLPOINTER)(uintptr_t)cur->paramInfos[i].DecimalDigits, 0); SQLSetDescField(desc, i + 1, SQL_DESC_DATA_PTR, bindptr, 0); } bindptr += cur->paramInfos[i].BufferLength + sizeof(SQLLEN); } Py_ssize_t rowlen = bindptr - (char*)16; // Assume parameters are homogeneous between rows in the common case, to avoid // another rescan for determining the array height. // Subtract number of rows processed as an upper bound. if (!(cur->paramArray = (unsigned char*)PyMem_Malloc(rowlen * (rowcount - r)))) { PyErr_NoMemory(); goto ErrorRet4; } unsigned char *pParamDat = cur->paramArray; Py_ssize_t rows_converted = 0; ParamInfo *pi; for (;;) { // Column loop. pi = &cur->paramInfos[0]; for (int c = 0; c < cur->paramcount; c++, pi++) { if (!PyToCType(cur, &pParamDat, *cells++, pi)) { // "schema change" or conversion error. Try again on next batch. rowptr--; Py_XDECREF(colseq); colseq = 0; // Finish this batch of rows and attempt to execute before starting another. goto DoExecute; } } rows_converted++; Py_XDECREF(colseq); colseq = 0; r++; if ( r >= rowcount ) { break; } currow = *rowptr++; colseq = PySequence_Fast(currow, "Row must be a sequence."); if (!colseq) { ErrorRet5: PyMem_Free(cur->paramArray); cur->paramArray = 0; goto ErrorRet4; } if (PySequence_Fast_GET_SIZE(colseq) != cur->paramcount) { RaiseErrorV(0, ProgrammingError, "Expected %u parameters, supplied %u", cur->paramcount, PySequence_Fast_GET_SIZE(colseq)); Py_XDECREF(colseq); goto ErrorRet5; } cells = PySequence_Fast_ITEMS(colseq); } DoExecute: if (!rows_converted || PyErr_Occurred()) { if (!PyErr_Occurred()) RaiseErrorV(0, ProgrammingError, "No suitable conversion for one or more parameters."); goto ErrorRet5; } SQLULEN bop = (SQLULEN)(cur->paramArray) - 16; if (!SQL_SUCCEEDED(SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAM_BIND_TYPE, (SQLPOINTER)rowlen, SQL_IS_UINTEGER))) { RaiseErrorFromHandle(cur->cnxn, "SQLSetStmtAttr", GetConnection(cur)->hdbc, cur->hstmt); ErrorRet6: SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAM_BIND_TYPE, SQL_BIND_BY_COLUMN, SQL_IS_UINTEGER); goto ErrorRet5; } if (!SQL_SUCCEEDED(SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)rows_converted, SQL_IS_UINTEGER))) { RaiseErrorFromHandle(cur->cnxn, "SQLSetStmtAttr", GetConnection(cur)->hdbc, cur->hstmt); goto ErrorRet6; } if (!SQL_SUCCEEDED(SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAM_BIND_OFFSET_PTR, (SQLPOINTER)&bop, SQL_IS_POINTER))) { RaiseErrorFromHandle(cur->cnxn, "SQLSetStmtAttr", GetConnection(cur)->hdbc, cur->hstmt); ErrorRet7: SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)1, SQL_IS_UINTEGER); goto ErrorRet6; } // The code below was copy-pasted from cursor.cpp's execute() for convenience. // TODO: REFACTOR if there is possibility to reuse (maybe not, because DAE structure is different) Py_BEGIN_ALLOW_THREADS rc = SQLExecute(cur->hstmt); Py_END_ALLOW_THREADS if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); ErrorRet8: FreeParameterData(cur); SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAM_BIND_OFFSET_PTR, 0, SQL_IS_POINTER); goto ErrorRet7; } if (!SQL_SUCCEEDED(rc) && rc != SQL_NEED_DATA && rc != SQL_NO_DATA) { // We could try dropping through the while and if below, but if there is an error, we need to raise it before // FreeParameterData calls more ODBC functions. RaiseErrorFromHandle(cur->cnxn, "SQLExecute", cur->cnxn->hdbc, cur->hstmt); goto ErrorRet8; } if (rc == SQL_SUCCESS_WITH_INFO) { GetDiagRecs(cur); } // TODO: Refactor into ProcessDAEParams() ? while (rc == SQL_NEED_DATA) { // One or more parameters were too long to bind normally so we set the // length to SQL_LEN_DATA_AT_EXEC. ODBC will return SQL_NEED_DATA for // each of the parameters we did this for. // // For each one we set a pointer to the ParamInfo as the "parameter // data" we can access with SQLParamData. We've stashed everything we // need in there. szLastFunction = "SQLParamData"; DAEParam *pInfo; Py_BEGIN_ALLOW_THREADS rc = SQLParamData(cur->hstmt, (SQLPOINTER*)&pInfo); Py_END_ALLOW_THREADS if (rc != SQL_NEED_DATA && rc != SQL_NO_DATA && !SQL_SUCCEEDED(rc)) return RaiseErrorFromHandle(cur->cnxn, "SQLParamData", cur->cnxn->hdbc, cur->hstmt) != NULL; TRACE("SQLParamData() --> %d\n", rc); if (rc == SQL_NEED_DATA) { PyObject* objCell = pInfo->cell; // If the object is Unicode it needs to be converted into bytes before it can be used by SQLPutData if (PyUnicode_Check(objCell)) { const TextEnc& enc = cur->cnxn->sqlwchar_enc; PyObject* bytes = NULL; switch (enc.optenc) { case OPTENC_UTF8: bytes = PyUnicode_AsUTF8String(objCell); break; case OPTENC_UTF16: bytes = PyUnicode_AsUTF16String(objCell); break; case OPTENC_UTF16LE: bytes = PyUnicode_AsEncodedString(objCell, "utf_16_le", NULL); break; case OPTENC_UTF16BE: bytes = PyUnicode_AsEncodedString(objCell, "utf_16_be", NULL); break; } if (bytes && PyBytes_Check(bytes)) { objCell = bytes; } //TODO: Raise or clear error when bytes == NULL. } szLastFunction = "SQLPutData"; if (PyBytes_Check(objCell) || PyByteArray_Check(objCell)) { char *(*pGetPtr)(PyObject*); Py_ssize_t (*pGetLen)(PyObject*); if (PyByteArray_Check(objCell)) { pGetPtr = PyByteArray_AsString; pGetLen = PyByteArray_Size; } else { pGetPtr = PyBytes_AsString; pGetLen = PyBytes_Size; } const char* p = pGetPtr(objCell); SQLLEN cb = (SQLLEN)pGetLen(objCell); SQLLEN offset = 0; do { SQLLEN remaining = min(pInfo->maxlen, cb - offset); TRACE("SQLPutData [%d] (%d) %.10s\n", offset, remaining, &p[offset]); Py_BEGIN_ALLOW_THREADS rc = SQLPutData(cur->hstmt, (SQLPOINTER)&p[offset], remaining); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(rc)) return RaiseErrorFromHandle(cur->cnxn, "SQLPutData", cur->cnxn->hdbc, cur->hstmt) != NULL; offset += remaining; } while (offset < cb); if (PyUnicode_Check(pInfo->cell) && PyBytes_Check(objCell)) { Py_XDECREF(objCell); } } Py_XDECREF(pInfo->cell); rc = SQL_NEED_DATA; } } if (!SQL_SUCCEEDED(rc) && rc != SQL_NO_DATA) return RaiseErrorFromHandle(cur->cnxn, szLastFunction, cur->cnxn->hdbc, cur->hstmt) != NULL; SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)1, SQL_IS_UINTEGER); SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAM_BIND_OFFSET_PTR, 0, SQL_IS_POINTER); PyMem_Free(cur->paramArray); cur->paramArray = 0; } Py_XDECREF(rowseq); FreeParameterData(cur); return ret; } static bool GetParamType(Cursor* cur, Py_ssize_t index, SQLSMALLINT& type) { // Returns the ODBC type of the of given parameter. // // Normally we set the parameter type based on the parameter's Python object type (e.g. str --> SQL_CHAR), so this // is only called when the parameter is None. In that case, we can't guess the type and have to use // SQLDescribeParam. // // If the database doesn't support SQLDescribeParam, we return SQL_VARCHAR since it converts to most other types. // However, it will not usually work if the target column is a binary column. if (!GetConnection(cur)->supports_describeparam || cur->paramcount == 0) { type = SQL_VARCHAR; return true; } if (cur->paramtypes == 0) { cur->paramtypes = reinterpret_cast(PyMem_Malloc(sizeof(SQLSMALLINT) * cur->paramcount)); if (cur->paramtypes == 0) { PyErr_NoMemory(); return false; } // SQL_UNKNOWN_TYPE is zero, so zero out all columns since we haven't looked any up yet. memset(cur->paramtypes, 0, sizeof(SQLSMALLINT) * cur->paramcount); } if (cur->paramtypes[index] == SQL_UNKNOWN_TYPE) { SQLULEN ParameterSizePtr; SQLSMALLINT DecimalDigitsPtr; SQLSMALLINT NullablePtr; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLDescribeParam(cur->hstmt, (SQLUSMALLINT)(index + 1), &cur->paramtypes[index], &ParameterSizePtr, &DecimalDigitsPtr, &NullablePtr); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { // This can happen with ("select ?", None). We'll default to VARCHAR which works with most types. cur->paramtypes[index] = SQL_VARCHAR; } } type = cur->paramtypes[index]; return true; } struct NullParam { PyObject_HEAD }; PyTypeObject NullParamType = { PyVarObject_HEAD_INIT(NULL, 0) "pyodbc.NullParam", // tp_name sizeof(NullParam), // tp_basicsize 0, // tp_itemsize 0, // destructor tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare 0, // tp_repr 0, // tp_as_number 0, // tp_as_sequence 0, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, // tp_flags }; PyObject* null_binary; bool Params_init() { if (PyType_Ready(&NullParamType) < 0) return false; null_binary = (PyObject*)PyObject_New(NullParam, &NullParamType); if (null_binary == 0) return false; PyDateTime_IMPORT; return true; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/params.h0000644000175100001770000000063514560207600014631 0ustar00runnerdocker #ifndef PARAMS_H #define PARAMS_H bool Params_init(); struct Cursor; bool PrepareAndBind(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first); bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj); bool GetParameterInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP); void FreeParameterData(Cursor* cur); void FreeParameterInfo(Cursor* cur); #endif ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1707151235.2462423 pyodbc-5.1.0/src/pyodbc.egg-info/0000755000175100001770000000000014560207603016146 5ustar00runnerdocker././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151235.0 pyodbc-5.1.0/src/pyodbc.egg-info/PKG-INFO0000644000175100001770000000163414560207603017247 0ustar00runnerdockerMetadata-Version: 2.1 Name: pyodbc Version: 5.1.0 Summary: DB API Module for ODBC Home-page: https://github.com/mkleehammer/pyodbc Maintainer: Michael Kleehammer Maintainer-email: michael@kleehammer.com License: MIT Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Topic :: Database Requires-Python: >=3.8 License-File: LICENSE.txt pyodbc is an open source Python module that makes accessing ODBC databases simple. It implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) specification but is packed with even more Pythonic convenience. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151235.0 pyodbc-5.1.0/src/pyodbc.egg-info/SOURCES.txt0000644000175100001770000000126614560207603020037 0ustar00runnerdockerLICENSE.txt MANIFEST.in README.md pyproject.toml setup.py src/cnxninfo.cpp src/cnxninfo.h src/connection.cpp src/connection.h src/cursor.cpp src/cursor.h src/dbspecific.h src/decimal.cpp src/decimal.h src/errors.cpp src/errors.h src/getdata.cpp src/getdata.h src/params.cpp src/params.h src/pyodbc.h src/pyodbc.pyi src/pyodbcdbg.cpp src/pyodbcmodule.cpp src/pyodbcmodule.h src/resource.h src/row.cpp src/row.h src/textenc.cpp src/textenc.h src/wrapper.h src/pyodbc.egg-info/PKG-INFO src/pyodbc.egg-info/SOURCES.txt src/pyodbc.egg-info/dependency_links.txt src/pyodbc.egg-info/top_level.txt tests/__init__.py tests/conftest.py tests/mysql_test.py tests/postgresql_test.py tests/sqlserver_test.py././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151235.0 pyodbc-5.1.0/src/pyodbc.egg-info/dependency_links.txt0000644000175100001770000000000114560207603022214 0ustar00runnerdocker ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151235.0 pyodbc-5.1.0/src/pyodbc.egg-info/top_level.txt0000644000175100001770000000001014560207603020667 0ustar00runnerdocker pyodbc ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/pyodbc.h0000644000175100001770000000657314560207600014635 0ustar00runnerdocker // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef PYODBC_H #define PYODBC_H #ifdef _MSC_VER // The MS headers generate a ton of warnings. #pragma warning(push, 0) #define _CRT_SECURE_NO_WARNINGS #include #include #pragma warning(pop) typedef __int64 INT64; typedef unsigned __int64 UINT64; #else typedef unsigned char byte; typedef unsigned int UINT; typedef long long INT64; typedef unsigned long long UINT64; #define _strcmpi strcasecmp #define _strdup strdup inline int max(int lhs, int rhs) { return (rhs > lhs) ? rhs : lhs; } #endif #ifdef __SUN__ #include #endif #define PY_SSIZE_T_CLEAN 1 #include #include #include #include #include #include #include #ifdef __CYGWIN__ #include #endif #include #include #ifndef _countof #define _countof(a) (sizeof(a) / sizeof(a[0])) #endif #ifndef SQL_SS_TABLE #define SQL_SS_TABLE -153 #endif #ifndef SQL_SOPT_SS_PARAM_FOCUS #define SQL_SOPT_SS_PARAM_FOCUS 1236 #endif #ifndef SQL_CA_SS_TYPE_NAME #define SQL_CA_SS_TYPE_NAME 1227 #endif #ifndef SQL_CA_SS_SCHEMA_NAME #define SQL_CA_SS_SCHEMA_NAME 1226 #endif #ifndef SQL_CA_SS_CATALOG_NAME #define SQL_CA_SS_CATALOG_NAME 1225 #endif inline bool IsSet(DWORD grf, DWORD flags) { return (grf & flags) == flags; } #ifdef UNUSED #undef UNUSED #endif inline void UNUSED(...) { } #include #if defined(__SUNPRO_CC) || defined(__SUNPRO_C) || (defined(__GNUC__) && !defined(__MINGW32__)) #ifndef __FreeBSD__ #include #endif #define CDECL cdecl #define min(X,Y) ((X) < (Y) ? (X) : (Y)) #define max(X,Y) ((X) > (Y) ? (X) : (Y)) #define _alloca alloca inline void _strlwr(char* name) { while (*name) { *name = tolower(*name); name++; } } #else #define CDECL #endif #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) #ifdef PYODBC_TRACE void DebugTrace(const char* szFmt, ...); #else inline void DebugTrace(const char* szFmt, ...) { UNUSED(szFmt); } #endif #define TRACE DebugTrace // issue #880: entry missing from iODBC sqltypes.h #ifndef BYTE typedef unsigned char BYTE; #endif bool PyMem_Realloc(BYTE** pp, size_t newlen); // A wrapper around realloc with a safer interface. If it is successful, *pp is updated to the // new pointer value. If not successful, it is not modified. (It is easy to forget and lose // the old pointer value with realloc.) void PrintBytes(void* p, size_t len); const char* CTypeName(SQLSMALLINT n); const char* SqlTypeName(SQLSMALLINT n); #endif // pyodbc_h ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/pyodbc.pyi0000644000175100001770000007713714560207600015213 0ustar00runnerdockerfrom __future__ import annotations from typing import ( Any, Callable, Dict, Final, Generator, Iterable, Iterator, List, Optional, Sequence, Tuple, Union, ) # SQLSetConnectAttr attributes # ref: https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlsetconnectattr-function SQL_ATTR_ACCESS_MODE: int SQL_ATTR_AUTOCOMMIT: int SQL_ATTR_CURRENT_CATALOG: int SQL_ATTR_LOGIN_TIMEOUT: int SQL_ATTR_ODBC_CURSORS: int SQL_ATTR_QUIET_MODE: int SQL_ATTR_TRACE: int SQL_ATTR_TRACEFILE: int SQL_ATTR_TRANSLATE_LIB: int SQL_ATTR_TRANSLATE_OPTION: int SQL_ATTR_TXN_ISOLATION: int # other (e.g. specific to certain RDBMSs) SQL_ACCESS_MODE: int SQL_AUTOCOMMIT: int SQL_CURRENT_QUALIFIER: int SQL_LOGIN_TIMEOUT: int SQL_ODBC_CURSORS: int SQL_OPT_TRACE: int SQL_OPT_TRACEFILE: int SQL_PACKET_SIZE: int SQL_QUIET_MODE: int SQL_TRANSLATE_DLL: int SQL_TRANSLATE_OPTION: int SQL_TXN_ISOLATION: int # Unicode SQL_ATTR_ANSI_APP: int # ODBC column data types # https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/appendix-d-data-types SQL_UNKNOWN_TYPE: int SQL_CHAR: int SQL_VARCHAR: int SQL_LONGVARCHAR: int SQL_WCHAR: int SQL_WVARCHAR: int SQL_WLONGVARCHAR: int SQL_DECIMAL: int SQL_NUMERIC: int SQL_SMALLINT: int SQL_INTEGER: int SQL_REAL: int SQL_FLOAT: int SQL_DOUBLE: int SQL_BIT: int SQL_TINYINT: int SQL_BIGINT: int SQL_BINARY: int SQL_VARBINARY: int SQL_LONGVARBINARY: int SQL_TYPE_DATE: int SQL_TYPE_TIME: int SQL_TYPE_TIMESTAMP: int SQL_SS_TIME2: int SQL_SS_XML: int SQL_INTERVAL_MONTH: int SQL_INTERVAL_YEAR: int SQL_INTERVAL_YEAR_TO_MONTH: int SQL_INTERVAL_DAY: int SQL_INTERVAL_HOUR: int SQL_INTERVAL_MINUTE: int SQL_INTERVAL_SECOND: int SQL_INTERVAL_DAY_TO_HOUR: int SQL_INTERVAL_DAY_TO_MINUTE: int SQL_INTERVAL_DAY_TO_SECOND: int SQL_INTERVAL_HOUR_TO_MINUTE: int SQL_INTERVAL_HOUR_TO_SECOND: int SQL_INTERVAL_MINUTE_TO_SECOND: int SQL_GUID: int # SQLDescribeCol SQL_NO_NULLS: int SQL_NULLABLE: int SQL_NULLABLE_UNKNOWN: int # specific to pyodbc SQL_WMETADATA: int # SQL_CONVERT_X SQL_CONVERT_FUNCTIONS: int SQL_CONVERT_BIGINT: int SQL_CONVERT_BINARY: int SQL_CONVERT_BIT: int SQL_CONVERT_CHAR: int SQL_CONVERT_DATE: int SQL_CONVERT_DECIMAL: int SQL_CONVERT_DOUBLE: int SQL_CONVERT_FLOAT: int SQL_CONVERT_GUID: int SQL_CONVERT_INTEGER: int SQL_CONVERT_INTERVAL_DAY_TIME: int SQL_CONVERT_INTERVAL_YEAR_MONTH: int SQL_CONVERT_LONGVARBINARY: int SQL_CONVERT_LONGVARCHAR: int SQL_CONVERT_NUMERIC: int SQL_CONVERT_REAL: int SQL_CONVERT_SMALLINT: int SQL_CONVERT_TIME: int SQL_CONVERT_TIMESTAMP: int SQL_CONVERT_TINYINT: int SQL_CONVERT_VARBINARY: int SQL_CONVERT_VARCHAR: int SQL_CONVERT_WCHAR: int SQL_CONVERT_WLONGVARCHAR: int SQL_CONVERT_WVARCHAR: int # transaction isolation # ref: https://docs.microsoft.com/en-us/sql/relational-databases/native-client-odbc-cursors/properties/cursor-transaction-isolation-level SQL_TXN_READ_COMMITTED: int SQL_TXN_READ_UNCOMMITTED: int SQL_TXN_REPEATABLE_READ: int SQL_TXN_SERIALIZABLE: int # outer join capabilities SQL_OJ_LEFT: int SQL_OJ_RIGHT: int SQL_OJ_FULL: int SQL_OJ_NESTED: int SQL_OJ_NOT_ORDERED: int SQL_OJ_INNER: int SQL_OJ_ALL_COMPARISON_OPS: int # other ODBC database constants SQL_SCOPE_CURROW: int SQL_SCOPE_TRANSACTION: int SQL_SCOPE_SESSION: int SQL_PC_UNKNOWN: int SQL_PC_NOT_PSEUDO: int SQL_PC_PSEUDO: int # SQL_INDEX_BTREE: int # SQL_INDEX_CLUSTERED: int # SQL_INDEX_CONTENT: int # SQL_INDEX_HASHED: int # SQL_INDEX_OTHER: int # attributes for the ODBC SQLGetInfo function # https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgetinfo-function SQL_ACCESSIBLE_PROCEDURES: int SQL_ACCESSIBLE_TABLES: int SQL_ACTIVE_ENVIRONMENTS: int SQL_AGGREGATE_FUNCTIONS: int SQL_ALTER_DOMAIN: int SQL_ALTER_TABLE: int SQL_ASYNC_MODE: int SQL_BATCH_ROW_COUNT: int SQL_BATCH_SUPPORT: int SQL_BOOKMARK_PERSISTENCE: int SQL_CATALOG_LOCATION: int SQL_CATALOG_NAME: int SQL_CATALOG_NAME_SEPARATOR: int SQL_CATALOG_TERM: int SQL_CATALOG_USAGE: int SQL_COLLATION_SEQ: int SQL_COLUMN_ALIAS: int SQL_CONCAT_NULL_BEHAVIOR: int SQL_CORRELATION_NAME: int SQL_CREATE_ASSERTION: int SQL_CREATE_CHARACTER_SET: int SQL_CREATE_COLLATION: int SQL_CREATE_DOMAIN: int SQL_CREATE_SCHEMA: int SQL_CREATE_TABLE: int SQL_CREATE_TRANSLATION: int SQL_CREATE_VIEW: int SQL_CURSOR_COMMIT_BEHAVIOR: int SQL_CURSOR_ROLLBACK_BEHAVIOR: int # SQL_CURSOR_ROLLBACK_SQL_CURSOR_SENSITIVITY: int SQL_DATABASE_NAME: int SQL_DATA_SOURCE_NAME: int SQL_DATA_SOURCE_READ_ONLY: int SQL_DATETIME_LITERALS: int SQL_DBMS_NAME: int SQL_DBMS_VER: int SQL_DDL_INDEX: int SQL_DEFAULT_TXN_ISOLATION: int SQL_DESCRIBE_PARAMETER: int SQL_DM_VER: int SQL_DRIVER_HDESC: int SQL_DRIVER_HENV: int SQL_DRIVER_HLIB: int SQL_DRIVER_HSTMT: int SQL_DRIVER_NAME: int SQL_DRIVER_ODBC_VER: int SQL_DRIVER_VER: int SQL_DROP_ASSERTION: int SQL_DROP_CHARACTER_SET: int SQL_DROP_COLLATION: int SQL_DROP_DOMAIN: int SQL_DROP_SCHEMA: int SQL_DROP_TABLE: int SQL_DROP_TRANSLATION: int SQL_DROP_VIEW: int SQL_DYNAMIC_CURSOR_ATTRIBUTES1: int SQL_DYNAMIC_CURSOR_ATTRIBUTES2: int SQL_EXPRESSIONS_IN_ORDERBY: int SQL_FILE_USAGE: int SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1: int SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2: int SQL_GETDATA_EXTENSIONS: int SQL_GROUP_BY: int SQL_IDENTIFIER_CASE: int SQL_IDENTIFIER_QUOTE_CHAR: int SQL_INDEX_KEYWORDS: int SQL_INFO_SCHEMA_VIEWS: int SQL_INSERT_STATEMENT: int SQL_INTEGRITY: int SQL_KEYSET_CURSOR_ATTRIBUTES1: int SQL_KEYSET_CURSOR_ATTRIBUTES2: int SQL_KEYWORDS: int SQL_LIKE_ESCAPE_CLAUSE: int SQL_MAX_ASYNC_CONCURRENT_STATEMENTS: int SQL_MAX_BINARY_LITERAL_LEN: int SQL_MAX_CATALOG_NAME_LEN: int SQL_MAX_CHAR_LITERAL_LEN: int SQL_MAX_COLUMNS_IN_GROUP_BY: int SQL_MAX_COLUMNS_IN_INDEX: int SQL_MAX_COLUMNS_IN_ORDER_BY: int SQL_MAX_COLUMNS_IN_SELECT: int SQL_MAX_COLUMNS_IN_TABLE: int SQL_MAX_COLUMN_NAME_LEN: int SQL_MAX_CONCURRENT_ACTIVITIES: int SQL_MAX_CURSOR_NAME_LEN: int SQL_MAX_DRIVER_CONNECTIONS: int SQL_MAX_IDENTIFIER_LEN: int SQL_MAX_INDEX_SIZE: int SQL_MAX_PROCEDURE_NAME_LEN: int SQL_MAX_ROW_SIZE: int SQL_MAX_ROW_SIZE_INCLUDES_LONG: int SQL_MAX_SCHEMA_NAME_LEN: int SQL_MAX_STATEMENT_LEN: int SQL_MAX_TABLES_IN_SELECT: int SQL_MAX_TABLE_NAME_LEN: int SQL_MAX_USER_NAME_LEN: int SQL_MULTIPLE_ACTIVE_TXN: int SQL_MULT_RESULT_SETS: int SQL_NEED_LONG_DATA_LEN: int SQL_NON_NULLABLE_COLUMNS: int SQL_NULL_COLLATION: int SQL_NUMERIC_FUNCTIONS: int SQL_ODBC_INTERFACE_CONFORMANCE: int SQL_ODBC_VER: int SQL_OJ_CAPABILITIES: int SQL_ORDER_BY_COLUMNS_IN_SELECT: int SQL_PARAM_ARRAY_ROW_COUNTS: int SQL_PARAM_ARRAY_SELECTS: int SQL_PARAM_TYPE_UNKNOWN: int SQL_PARAM_INPUT: int SQL_PARAM_INPUT_OUTPUT: int SQL_PARAM_OUTPUT: int SQL_RETURN_VALUE: int SQL_RESULT_COL: int SQL_PROCEDURES: int SQL_PROCEDURE_TERM: int SQL_QUOTED_IDENTIFIER_CASE: int SQL_ROW_UPDATES: int SQL_SCHEMA_TERM: int SQL_SCHEMA_USAGE: int SQL_SCROLL_OPTIONS: int SQL_SEARCH_PATTERN_ESCAPE: int SQL_SERVER_NAME: int SQL_SPECIAL_CHARACTERS: int SQL_SQL92_DATETIME_FUNCTIONS: int SQL_SQL92_FOREIGN_KEY_DELETE_RULE: int SQL_SQL92_FOREIGN_KEY_UPDATE_RULE: int SQL_SQL92_GRANT: int SQL_SQL92_NUMERIC_VALUE_FUNCTIONS: int SQL_SQL92_PREDICATES: int SQL_SQL92_RELATIONAL_JOIN_OPERATORS: int SQL_SQL92_REVOKE: int SQL_SQL92_ROW_VALUE_CONSTRUCTOR: int SQL_SQL92_STRING_FUNCTIONS: int SQL_SQL92_VALUE_EXPRESSIONS: int SQL_SQL_CONFORMANCE: int SQL_STANDARD_CLI_CONFORMANCE: int SQL_STATIC_CURSOR_ATTRIBUTES1: int SQL_STATIC_CURSOR_ATTRIBUTES2: int SQL_STRING_FUNCTIONS: int SQL_SUBQUERIES: int SQL_SYSTEM_FUNCTIONS: int SQL_TABLE_TERM: int SQL_TIMEDATE_ADD_INTERVALS: int SQL_TIMEDATE_DIFF_INTERVALS: int SQL_TIMEDATE_FUNCTIONS: int SQL_TXN_CAPABLE: int SQL_TXN_ISOLATION_OPTION: int SQL_UNION: int SQL_USER_NAME: int SQL_XOPEN_CLI_YEAR: int # pyodbc-specific constants BinaryNull: Any # to distinguish binary NULL values from char NULL values SQLWCHAR_SIZE: int # module attributes # https://www.python.org/dev/peps/pep-0249/#globals # read-only apilevel: Final[str] = '2.0' paramstyle: Final[str] = 'qmark' threadsafety: Final[int] = 1 version: Final[str] # not pep-0249 # read-write (not pep-0249) lowercase: bool = False native_uuid: bool = False odbcversion: str = '3.X' pooling: bool = True # exceptions # https://www.python.org/dev/peps/pep-0249/#exceptions class Warning(Exception): ... class Error(Exception): ... class InterfaceError(Error): ... class DatabaseError(Error): ... class DataError(DatabaseError): ... class OperationalError(DatabaseError): ... class IntegrityError(DatabaseError): ... class InternalError(DatabaseError): ... class ProgrammingError(DatabaseError): ... class NotSupportedError(DatabaseError): ... class Connection: """The ODBC connection class representing an ODBC connection to a database, for managing database transactions and creating cursors. https://www.python.org/dev/peps/pep-0249/#connection-objects This class should not be instantiated directly, instead call pyodbc.connect() to create a Connection object. """ @property def autocommit(self) -> bool: """Whether the database automatically executes a commit after every successful transaction. Default is False. """ ... @autocommit.setter def autocommit(self, value: bool) -> None: ... @property def closed(self) -> bool: """Returns True if the connection is closed, False otherwise.""" ... @property def maxwrite(self) -> int: """The maximum bytes to write before using SQLPutData, default is zero for no maximum.""" ... @maxwrite.setter def maxwrite(self, value: int) -> None: ... @property def searchescape(self) -> str: """The character for escaping search pattern characters like "%" and "_". This is typically the backslash character but can be driver-specific.""" ... @property def timeout(self) -> int: """The timeout in seconds for SQL queries, use zero (the default) for no timeout limit.""" ... @timeout.setter def timeout(self, value: int) -> None: ... # implemented dunder methods def __enter__(self) -> Connection: ... def __exit__(self, exc_type, exc_value, traceback) -> None: ... # functions for defining the text encoding used for data, metadata, sql, parameters, etc. def setencoding(self, encoding: Optional[str] = None, ctype: Optional[int] = None) -> None: """Set the text encoding for SQL statements and textual parameters sent to the database. Args: encoding: Text encoding codec, e.g. "utf-8". ctype: The C data type when passing data - either pyodbc.SQL_CHAR or pyodbc.SQL_WCHAR. More relevant for Python 2.7. """ ... def setdecoding(self, sqltype: int, encoding: Optional[str] = None, ctype: Optional[int] = None) -> None: """Set the text decoding used when reading SQL_CHAR or SQL_WCHAR data from the database. Args: sqltype: pyodbc.SQL_CHAR, pyodbc.SQL_WCHAR, or pyodbc.SQL_WMETADATA. encoding: Text encoding codec, e.g. "utf-8". ctype: The C data type to request from SQLGetData - either pyodbc.SQL_CHAR or pyodbc.SQL_WCHAR. More relevant for Python 2.7. """ ... # functions for getting/setting connection attributes def getinfo(self, infotype: int, /) -> Any: """Retrieve general information about the driver and the data source, via SQLGetInfo. Args: infotype: Id of the information to retrieve. Returns: The value of the requested information. """ ... def set_attr(self, attr_id: int, value: int, /) -> None: """Set an attribute on the connection, via SQLSetConnectAttr. Args: attr_id: Id for the attribute, as defined by ODBC or the driver. value: The value of the attribute. """ ... # functions to handle non-standard database data types def add_output_converter(self, sqltype: int, func: Optional[Callable], /) -> None: """Register an output converter function that will be called whenever a value with the given SQL type is read from the database. See the Wiki for details: https://github.com/mkleehammer/pyodbc/wiki/Using-an-Output-Converter-function Args: sqltype: The SQL type for the values to convert. func: The converter function. """ ... def get_output_converter(self, sqltype: int, /) -> Optional[Callable]: """Retrieve the (previously registered) converter function for the SQL type. Args: sqltype: The SQL type. Returns: The converter function if it exists, None otherwise. """ ... def remove_output_converter(self, sqltype: int, /) -> None: """Delete a previously registered output converter function. Args: sqltype: The SQL type. """ ... def clear_output_converters(self) -> None: """Delete all previously registered converter functions.""" ... # functions for managing database transactions (in typical order of use) def cursor(self) -> Cursor: """Create a new cursor on the connection. Returns: A new cursor. """ ... def execute(self, sql: str, *params: Any) -> Cursor: """A convenience function for running queries directly from a Connection object. Creates a new cursor, runs the SQL query, and returns the new cursor. Args: sql: The SQL query. *params: Any parameter values for the SQL query. Returns: A new cursor. """ ... def commit(self) -> None: """Commit all SQL statements executed on the connection since the last commit/rollback.""" ... def rollback(self) -> None: """Rollback all SQL statements executed on the connection since the last commit/rollback.""" ... def close(self) -> None: """Close the connection. Any uncommitted SQL statements will be rolled back.""" ... class Cursor: """The class representing database cursors. Cursors are vehicles for executing SQL statements and returning their results. https://www.python.org/dev/peps/pep-0249/#cursor-objects This class should not be instantiated directly, instead call cursor() from a Connection object to create a Cursor object. """ @property def arraysize(self) -> int: """The number of rows at a time to fetch with fetchmany(), default is 1.""" ... @arraysize.setter def arraysize(self, value: int) -> None: ... @property def connection(self) -> Connection: """The parent Connection object for the cursor.""" ... @property def description(self) -> Tuple[Tuple[str, Any, int, int, int, int, bool]]: """The metadata for the columns returned in the last SQL SELECT statement, in the form of a list of tuples. Each tuple contains seven fields: 0. name of the column (or column alias) 1. type code, the Python-equivalent class of the column, e.g. str for VARCHAR 2. display size (pyodbc does not set this value) 3. internal size (in bytes) 4. precision 5. scale 6. nullable (True/False) ref: https://peps.python.org/pep-0249/#description """ ... @property def fast_executemany(self) -> bool: """When this cursor property is False (the default), calls to executemany() do nothing more than iterate over the provided list of parameters and calls execute() on each set of parameters. This is typically slow. When fast_executemany is True, the parameters are sent to the database in one bundle (with the SQL). This is usually much faster, but there are limitations. Check the Wiki for details. https://github.com/mkleehammer/pyodbc/wiki/Cursor#executemanysql-params-with-fast_executemanytrue """ ... @fast_executemany.setter def fast_executemany(self, value: bool) -> None: ... @property def messages(self) -> Optional[List[Tuple[str, Union[str, bytes]]]]: """Any descriptive messages returned by the last call to execute(), e.g. PRINT statements, or None.""" ... @property def noscan(self) -> bool: """Whether the driver should scan SQL strings for escape sequences, default is True.""" ... @noscan.setter def noscan(self, value: bool) -> None: ... @property def rowcount(self) -> int: """The number of rows modified by the last SQL statement. Has the value of -1 if the number of rows is unknown or unavailable. """ ... # implemented dunder methods def __enter__(self) -> Cursor: ... def __exit__(self, exc_type, exc_value, traceback) -> None: ... def __iter__(self, /) -> Cursor: ... def __next__(self, /) -> Row: ... # functions for running SQL queries (in rough order of use) def setinputsizes(self, sizes: Optional[Iterable[Tuple[int, int, int]]], /) -> None: """Explicitly declare the types and sizes of the parameters in a query. Set to None to clear any previously registered input sizes. Args: sizes: A list of tuples, one tuple for each query parameter, where each tuple contains: 1. the column datatype 2. the column size (char length or decimal precision) 3. the decimal scale. For example: [(pyodbc.SQL_WVARCHAR, 50, 0), (pyodbc.SQL_DECIMAL, 18, 4)] """ ... def setoutputsize(self) -> None: """Not supported.""" ... def execute(self, sql: str, *params: Any) -> Cursor: """Run a SQL query and return the cursor. Args: sql: The SQL query. *params: Any parameters for the SQL query, as positional arguments or a single iterable. Returns: The cursor, so that calls on the cursor can be chained. """ ... def executemany(self, sql: str, params: Union[Sequence, Iterator, Generator], /) -> None: """Run the SQL query against an iterable of parameters. The behavior of this function depends heavily on the setting of the fast_executemany cursor property. See the Wiki for details. https://github.com/mkleehammer/pyodbc/wiki/Cursor#executemanysql-params-with-fast_executemanyfalse-the-default https://github.com/mkleehammer/pyodbc/wiki/Cursor#executemanysql-params-with-fast_executemanytrue Args: sql: The SQL query. *params: Any parameters for the SQL query, as an iterable of parameter sets. """ ... def fetchone(self) -> Optional[Row]: """Retrieve the next row in the current result set for the query. Returns: A row of results, or None if there is no more data to return. """ ... def fetchmany(self, size: int, /) -> List[Row]: """Retrieve the next rows in the current result set for the query, as a list. Args: size: The number of rows to return. Returns: A list of rows, or an empty list if there is no more data to return. """ ... def fetchall(self) -> List[Row]: """Retrieve all the remaining rows in the current result set for the query, as a list. Returns: A list of rows, or an empty list if there is no more data to return. """ ... def fetchval(self) -> Any: """A convenience function for returning the first column of the first row from the query. Returns: The value in the first column of the first row, or None if there is no data. """ ... def skip(self, count: int, /) -> None: """Skip over rows in the current result set of a query. Args: count: The number of rows to skip. """ ... def nextset(self) -> bool: """Switch to the next result set in the SQL query (e.g. if there are multiple SELECT statements in the SQL script). Returns: True if there are more result sets, False otherwise. """ ... def commit(self) -> None: """Commit all SQL statements executed on the parent connection since the last commit/rollback. Note, this affects ALL cursors on the parent connection. Hence, consider calling commit() on the parent Connection object instead. """ ... def rollback(self) -> None: """Rollback all SQL statements executed on the parent connection since the last commit/rollback. Note, this affects ALL cursors on the parent connection. Hence, consider calling rollback() on the parent Connection object instead. """ ... def cancel(self) -> None: """Cancel the processing of the current query. Typically this has to be called from a separate thread. """ ... def close(self) -> None: """Close the cursor, discarding any remaining result sets and/or messages.""" ... # functions to retrieve database metadata def tables(self, table: Optional[str] = None, catalog: Optional[str] = None, schema: Optional[str] = None, tableType: Optional[str] = None) -> Cursor: """Return information about tables in the database, typically from the INFORMATION_SCHEMA.TABLES metadata view. Parameter values can include wildcard characters. Args: table: Name of the database table. catalog: Name of the catalog (database). schema: Name of the table schema. tableType: Kind of table, e.g. "BASE TABLE". Returns: The cursor object, containing table information in the result set. """ ... def columns(self, table: Optional[str] = None, catalog: Optional[str] = None, schema: Optional[str] = None, column: Optional[str] = None) -> Cursor: """Return information about columns in database tables, typically from the INFORMATION_SCHEMA.COLUMNS metadata view. Parameter values can include wildcard characters. Args: table: Name of the database table. catalog: Name of the catalog (database). schema: Name of the table schema. column: Name of the column in the table. Returns: The cursor object, containing column information in the result set. """ ... def statistics(self, table: str, catalog: Optional[str] = None, schema: Optional[str] = None, unique: bool = False, quick: bool = True) -> Cursor: """Return statistical information about database tables. Parameter values can include wildcard characters. Args: table: Name of the database table. catalog: Name of the catalog (database). schema: Name of the table schema. unique: If True, include information about unique indexes only, not all indexes. quick: If True, CARDINALITY and PAGES are returned only if they are readily available, otherwise None is returned for them. Returns: The cursor object, containing statistical information in the result set. """ ... def rowIdColumns(self, table: str, catalog: Optional[str] = None, schema: Optional[str] = None, nullable: bool = True) -> Cursor: """Return the column(s) in a database table that uniquely identify each row (e.g. the primary key column). Parameter values can include wildcard characters. Args: table: Name of the database table. catalog: Name of the catalog (database). schema: Name of the table schema. nullable: If True, include sets of columns that are nullable. Returns: The cursor object, containing the relevant column information in the result set. """ ... def rowVerColumns(self, table: str, catalog: Optional[str] = None, schema: Optional[str] = None, nullable: bool = True) -> Cursor: """Return the column(s) in a database table that are updated whenever the row is updated. Parameter values can include wildcard characters. Args: table: Name of the database table. catalog: Name of the catalog (database). schema: Name of the table schema. nullable: If True, include sets of columns that are nullable. Returns: The cursor object, containing the relevant column information in the result set. """ ... def primaryKeys(self, table: str, catalog: Optional[str] = None, schema: Optional[str] = None) -> Cursor: """Return the column(s) in a database table that make up the primary key on the table. Parameter values can include wildcard characters. Args: table: Name of the database table. catalog: Name of the catalog (database). schema: Name of the table schema. Returns: The cursor object, containing primary key information in the result set. """ ... def foreignKeys(self, table: Optional[str] = None, catalog: Optional[str] = None, schema: Optional[str] = None, foreignTable: Optional[str] = None, foreignCatalog: Optional[str] = None, foreignSchema: Optional[str] = None) -> Cursor: """Return the foreign keys in a database table, i.e. any columns that refer to primary key columns on another table. Parameter values can include wildcard characters. Args: table: Name of the database table. catalog: Name of the catalog (database). schema: Name of the table schema. foreignTable: Name of the foreign database table. foreignCatalog: Name of the foreign catalog (database). foreignSchema: Name of the foreign table schema. Returns: The cursor object, containing foreign key information in the result set. """ ... def procedures(self, procedure: Optional[str] = None, catalog: Optional[str] = None, schema: Optional[str] = None) -> Cursor: """Return information about stored procedures. Parameter values can include wildcard characters. Args: procedure: Name of the stored procedure. catalog: Name of the catalog (database). schema: Name of the table schema. Returns: The cursor object, containing stored procedure information in the result set. """ ... def procedureColumns(self, procedure: Optional[str] = None, catalog: Optional[str] = None, schema: Optional[str] = None) -> Cursor: """Return information about the columns used as input/output parameters in stored procedures. Parameter values can include wildcard characters. Args: procedure: Name of the stored procedure. catalog: Name of the catalog (database). schema: Name of the table schema. Returns: The cursor object, containing stored procedure column information in the result set. """ ... def getTypeInfo(self, sqlType: Optional[int] = None, /) -> Cursor: """Return information about data types supported by the data source. Args: sqlType: The SQL data type. Returns: The cursor object, containing information about the SQL data type in the result set. """ ... class Row: """The class representing a single record in the result set from a query. Objects of this class behave somewhat similarly to a NamedTuple. Column values can be accessed by column name (i.e. using dot notation) or by row index. """ @property def cursor_description(self) -> Tuple[Tuple[str, Any, int, int, int, int, bool]]: """The metadata for the columns in this Row, as retrieved from the parent Cursor object.""" ... # implemented dunder methods def __contains__(self, key, /) -> int: ... def __delattr__(self, name, /) -> None: ... def __delitem__(self, key, /) -> None: ... def __eq__(self, value, /) -> bool: ... def __ge__(self, value, /) -> bool: ... def __getattribute__(self, name, /) -> Any: ... def __getitem__(self, key, /) -> Any: ... def __gt__(self, value, /) -> bool: ... def __iter__(self) -> Iterator[Any]: ... def __le__(self, value, /) -> bool: ... def __len__(self, /) -> int: ... def __lt__(self, value, /) -> bool: ... def __ne__(self, value, /) -> bool: ... def __reduce__(self) -> Any: ... def __repr__(self, /) -> str: ... def __setattr__(self, name, value, /) -> None: ... def __setitem__(self, key, value, /) -> None: ... # module functions def dataSources() -> Dict[str, str]: """Return all available Data Source Names (DSNs), typically from the odbcinst.ini file or the Windows ODBC Data Source Administrator. Returns: A dictionary of DSNs and their textual descriptions. """ ... def drivers() -> List[str]: """Return the names of all available ODBC drivers, typically from the odbc.ini file or the Windows ODBC Data Source Administrator. Returns: A list of driver names. """ ... def getDecimalSeparator() -> str: """Retrieve the decimal separator character used when parsing NUMERIC/DECIMAL values from the database, e.g. the "." in "1,234.56". Returns: The decimal separator character. """ ... def setDecimalSeparator(sep: str, /) -> None: """Set the decimal separator character used when parsing NUMERIC/DECIMAL values from the database, e.g. the "." in "1,234.56". Args: sep: The decimal separator character. """ ... def connect(connstring: Optional[str] = None, /, *, # only positional parameters before, only named parameters after autocommit: bool = False, encoding: str = 'utf-16le', readonly: bool = False, timeout: int = 0, attrs_before: Optional[Dict[int, Any]] = None, **kwargs: Any) -> Connection: """Create a new ODBC connection to a database. See the Wiki for details: https://github.com/mkleehammer/pyodbc/wiki/The-pyodbc-Module#connect Args: connstring: The connection string, which is passed verbatim to the driver manager. autocommit: If True, instructs the database to commit after each SQL statement. encoding: Encoding codec used when sending textual connection parameters to the database. readonly: To set the connection read-only. Not all drivers and/or databases support this. timeout: Set the connection timeout, in seconds. This is managed by the driver, not pyodbc, and not all drivers support this. attrs_before: Set low-level connection attributes before a connection is attempted. **kwargs: These key/value pairs are used to construct the connection string, or add to it (as "key=value;" combinations). Returns: A new Connection object. """ ... ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/pyodbcdbg.cpp0000644000175100001770000000532314560207600015635 0ustar00runnerdocker #include "pyodbc.h" #include "dbspecific.h" void PrintBytes(void* p, size_t len) { unsigned char* pch = (unsigned char*)p; for (size_t i = 0; i < len; i++) printf("%02x ", (int)pch[i]); printf("\n"); } #define _MAKESTR(n) case n: return #n const char* SqlTypeName(SQLSMALLINT n) { switch (n) { _MAKESTR(SQL_UNKNOWN_TYPE); _MAKESTR(SQL_CHAR); _MAKESTR(SQL_VARCHAR); _MAKESTR(SQL_LONGVARCHAR); _MAKESTR(SQL_NUMERIC); _MAKESTR(SQL_DECIMAL); _MAKESTR(SQL_INTEGER); _MAKESTR(SQL_SMALLINT); _MAKESTR(SQL_FLOAT); _MAKESTR(SQL_REAL); _MAKESTR(SQL_DOUBLE); _MAKESTR(SQL_DATETIME); _MAKESTR(SQL_WCHAR); _MAKESTR(SQL_WVARCHAR); _MAKESTR(SQL_WLONGVARCHAR); _MAKESTR(SQL_TYPE_DATE); _MAKESTR(SQL_TYPE_TIME); _MAKESTR(SQL_TYPE_TIMESTAMP); _MAKESTR(SQL_SS_TIME2); _MAKESTR(SQL_SS_XML); _MAKESTR(SQL_BINARY); _MAKESTR(SQL_VARBINARY); _MAKESTR(SQL_LONGVARBINARY); } return "unknown"; } const char* CTypeName(SQLSMALLINT n) { switch (n) { _MAKESTR(SQL_C_CHAR); _MAKESTR(SQL_C_WCHAR); _MAKESTR(SQL_C_LONG); _MAKESTR(SQL_C_SHORT); _MAKESTR(SQL_C_FLOAT); _MAKESTR(SQL_C_DOUBLE); _MAKESTR(SQL_C_NUMERIC); _MAKESTR(SQL_C_DEFAULT); _MAKESTR(SQL_C_DATE); _MAKESTR(SQL_C_TIME); _MAKESTR(SQL_C_TIMESTAMP); _MAKESTR(SQL_C_TYPE_DATE); _MAKESTR(SQL_C_TYPE_TIME); _MAKESTR(SQL_C_TYPE_TIMESTAMP); _MAKESTR(SQL_C_INTERVAL_YEAR); _MAKESTR(SQL_C_INTERVAL_MONTH); _MAKESTR(SQL_C_INTERVAL_DAY); _MAKESTR(SQL_C_INTERVAL_HOUR); _MAKESTR(SQL_C_INTERVAL_MINUTE); _MAKESTR(SQL_C_INTERVAL_SECOND); _MAKESTR(SQL_C_INTERVAL_YEAR_TO_MONTH); _MAKESTR(SQL_C_INTERVAL_DAY_TO_HOUR); _MAKESTR(SQL_C_INTERVAL_DAY_TO_MINUTE); _MAKESTR(SQL_C_INTERVAL_DAY_TO_SECOND); _MAKESTR(SQL_C_INTERVAL_HOUR_TO_MINUTE); _MAKESTR(SQL_C_INTERVAL_HOUR_TO_SECOND); _MAKESTR(SQL_C_INTERVAL_MINUTE_TO_SECOND); _MAKESTR(SQL_C_BINARY); _MAKESTR(SQL_C_BIT); _MAKESTR(SQL_C_SBIGINT); _MAKESTR(SQL_C_UBIGINT); _MAKESTR(SQL_C_TINYINT); _MAKESTR(SQL_C_SLONG); _MAKESTR(SQL_C_SSHORT); _MAKESTR(SQL_C_STINYINT); _MAKESTR(SQL_C_ULONG); _MAKESTR(SQL_C_USHORT); _MAKESTR(SQL_C_UTINYINT); _MAKESTR(SQL_C_GUID); } return "unknown"; } #ifdef PYODBC_TRACE void DebugTrace(const char* szFmt, ...) { va_list marker; va_start(marker, szFmt); vprintf(szFmt, marker); va_end(marker); } #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/pyodbcmodule.cpp0000644000175100001770000013022114560207600016362 0ustar00runnerdocker// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "pyodbcmodule.h" #include "connection.h" #include "cursor.h" #include "row.h" #include "errors.h" #include "getdata.h" #include "cnxninfo.h" #include "params.h" #include "dbspecific.h" #include "decimal.h" #include #include #include static PyObject* MakeConnectionString(PyObject* existing, PyObject* parts); PyObject* pModule = 0; static char module_doc[] = "A database module for accessing databases via ODBC.\n" "\n" "This module conforms to the DB API 2.0 specification while providing\n" "non-standard convenience features. Only standard Python data types are used\n" "so additional DLLs are not required.\n" "\n" "Static Variables:\n\n" "version\n" " The module version string. Official builds will have a version in the format\n" " `major.minor.revision`, such as 2.1.7. Beta versions will have -beta appended,\n" " such as 2.1.8-beta03. (This would be a build before the official 2.1.8 release.)\n" " Some special test builds will have a test name (the git branch name) prepended,\n" " such as fixissue90-2.1.8-beta03.\n" "\n" "apilevel\n" " The string constant '2.0' indicating this module supports DB API level 2.0.\n" "\n" "lowercase\n" " A Boolean that controls whether column names in result rows are lowercased.\n" " This can be changed any time and affects queries executed after the change.\n" " The default is False. This can be useful when database columns have\n" " inconsistent capitalization.\n" "\n" "pooling\n" " A Boolean indicating whether connection pooling is enabled. This is a\n" " global (HENV) setting, so it can only be modified before the first\n" " connection is made. The default is True, which enables ODBC connection\n" " pooling.\n" "\n" "threadsafety\n" " The integer 1, indicating that threads may share the module but not\n" " connections. Note that connections and cursors may be used by different\n" " threads, just not at the same time.\n" "\n" "paramstyle\n" " The string constant 'qmark' to indicate parameters are identified using\n" " question marks.\n" "\n" "odbcversion\n" " The ODBC version number as a string, such as '3.X' for ODBC 3.X compatibility.\n" " This is a global (HENV) setting, so it can only be modified before the first\n" " connection is made. Use 3.8 if you are using unixodbc connection pooling and your\n" " drivers are all 3.8 compatible. The default is '3.X'."; PyObject* Error; PyObject* Warning; PyObject* InterfaceError; PyObject* DatabaseError; PyObject* InternalError; PyObject* OperationalError; PyObject* ProgrammingError; PyObject* IntegrityError; PyObject* DataError; PyObject* NotSupportedError; struct ExcInfo { const char* szName; const char* szFullName; PyObject** ppexc; PyObject** ppexcParent; const char* szDoc; }; #define MAKEEXCINFO(name, parent, doc) { #name, "pyodbc." #name, &name, &parent, doc } static ExcInfo aExcInfos[] = { MAKEEXCINFO(Error, PyExc_Exception, "Exception that is the base class of all other error exceptions. You can use\n" "this to catch all errors with one single 'except' statement."), MAKEEXCINFO(Warning, PyExc_Exception, "Exception raised for important warnings like data truncations while inserting,\n" " etc."), MAKEEXCINFO(InterfaceError, Error, "Exception raised for errors that are related to the database interface rather\n" "than the database itself."), MAKEEXCINFO(DatabaseError, Error, "Exception raised for errors that are related to the database."), MAKEEXCINFO(DataError, DatabaseError, "Exception raised for errors that are due to problems with the processed data\n" "like division by zero, numeric value out of range, etc."), MAKEEXCINFO(OperationalError, DatabaseError, "Exception raised for errors that are related to the database's operation and\n" "not necessarily under the control of the programmer, e.g. an unexpected\n" "disconnect occurs, the data source name is not found, a transaction could not\n" "be processed, a memory allocation error occurred during processing, etc."), MAKEEXCINFO(IntegrityError, DatabaseError, "Exception raised when the relational integrity of the database is affected,\n" "e.g. a foreign key check fails."), MAKEEXCINFO(InternalError, DatabaseError, "Exception raised when the database encounters an internal error, e.g. the\n" "cursor is not valid anymore, the transaction is out of sync, etc."), MAKEEXCINFO(ProgrammingError, DatabaseError, "Exception raised for programming errors, e.g. table not found or already\n" "exists, syntax error in the SQL statement, wrong number of parameters\n" "specified, etc."), MAKEEXCINFO(NotSupportedError, DatabaseError, "Exception raised in case a method or database API was used which is not\n" "supported by the database, e.g. requesting a .rollback() on a connection that\n" "does not support transaction or has transactions turned off.") }; bool PyMem_Realloc(BYTE** pp, size_t newlen) { // A wrapper around realloc with a safer interface. If it is successful, *pp is updated to the // new pointer value. If not successful, it is not modified. (It is easy to forget and lose // the old pointer value with realloc.) BYTE* pT = (BYTE*)PyMem_Realloc(*pp, newlen); if (pT == 0) return false; *pp = pT; return true; } bool UseNativeUUID() { PyObject* o = PyObject_GetAttrString(pModule, "native_uuid"); // If this fails for some reason, we'll assume false and allow the exception to pop up later. bool b = o && PyObject_IsTrue(o); Py_XDECREF(o); return b; } HENV henv = SQL_NULL_HANDLE; PyObject* GetClassForThread(const char* szModule, const char* szClass) { // Returns the given class, specific to the current thread's interpreter. For performance // these are cached for each thread. // // This is for internal use only, so we'll cache using only the class name. Make sure they // are unique. (That is, don't try to import classes with the same name from two different // modules.) PyObject* dict = PyThreadState_GetDict(); assert(dict); if (dict == 0) { // I don't know why there wouldn't be thread state so I'm going to raise an exception // unless I find more info. return PyErr_Format(PyExc_Exception, "pyodbc: PyThreadState_GetDict returned NULL"); } // Check the cache. GetItemString returns a borrowed reference. PyObject* cls = PyDict_GetItemString(dict, szClass); if (cls) { Py_INCREF(cls); return cls; } // Import the class and cache it. GetAttrString returns a new reference. PyObject* mod = PyImport_ImportModule(szModule); if (!mod) return 0; cls = PyObject_GetAttrString(mod, szClass); Py_DECREF(mod); if (!cls) return 0; // SetItemString increments the refcount (not documented) PyDict_SetItemString(dict, szClass, cls); return cls; } bool IsInstanceForThread(PyObject* param, const char* szModule, const char* szClass, PyObject** pcls) { // Like PyObject_IsInstance but compares against a class specific to the current thread's // interpreter, for proper subinterpreter support. Uses GetClassForThread. // // If `param` is an instance of the given class, true is returned and a new reference to // the class, specific to the current thread, is returned via pcls. The caller is // responsible for decrementing the class. // // If `param` is not an instance, true is still returned (!) but *pcls will be zero. // // False is only returned when an exception has been raised. (That is, the return value is // not used to indicate whether the instance check matched or not.) if (param == 0) { *pcls = 0; return true; } PyObject* cls = GetClassForThread(szModule, szClass); if (!cls) { *pcls = 0; return false; } int n = PyObject_IsInstance(param, cls); // (The checks below can be compressed into just a few lines, but I was concerned it // wouldn't be clear.) if (n == 1) { // We have a match. *pcls = cls; return true; } Py_DECREF(cls); *pcls = 0; if (n == 0) { // No exception, but not a match. return true; } // n == -1; an exception occurred return false; } static bool import_types() { // Note: We can only import types from C extensions since they are shared among all // interpreters. Other classes are imported per-thread via GetClassForThread. // In Python 2.5 final, PyDateTime_IMPORT no longer works unless the datetime module was previously // imported (among other problems). PyObject* pdt = PyImport_ImportModule("datetime"); if (!pdt) return false; PyDateTime_IMPORT; Cursor_init(); if (!CnxnInfo_init()) return false; GetData_init(); if (!Params_init()) return false; if (!InitializeDecimal()) return false; return true; } static bool AllocateEnv() { PyObject* pooling = PyObject_GetAttrString(pModule, "pooling"); bool bPooling = pooling == Py_True; Py_DECREF(pooling); if (bPooling) { if (!SQL_SUCCEEDED(SQLSetEnvAttr(SQL_NULL_HANDLE, SQL_ATTR_CONNECTION_POOLING, (SQLPOINTER)SQL_CP_ONE_PER_HENV, sizeof(int)))) { PyErr_SetString(PyExc_RuntimeError, "Unable to set SQL_ATTR_CONNECTION_POOLING attribute."); return false; } } if (!SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &henv))) { PyErr_SetString(PyExc_RuntimeError, "Can't initialize module pyodbc. SQLAllocEnv failed."); return false; } SQLPOINTER defaultVersion = (SQLPOINTER)SQL_OV_ODBC3; PyObject* odbcversion = PyObject_GetAttrString(pModule, "odbcversion"); if (PyObject_TypeCheck(odbcversion, &PyUnicode_Type)) { if (PyUnicode_CompareWithASCIIString(odbcversion, "3.8") == 0) { defaultVersion = (SQLPOINTER)SQL_OV_ODBC3_80; } } Py_DECREF(odbcversion); if (!SQL_SUCCEEDED(SQLSetEnvAttr(henv, SQL_ATTR_ODBC_VERSION, defaultVersion, sizeof(int)))) { PyErr_SetString(PyExc_RuntimeError, "Unable to set SQL_ATTR_ODBC_VERSION attribute."); return false; } return true; } static bool CheckAttrsVal(PyObject *val, bool allowSeq) { if (PyLong_Check(val) || PyByteArray_Check(val) || PyBytes_Check(val) || PyUnicode_Check(val)) return true; if (allowSeq && PySequence_Check(val)) { Py_ssize_t len = PySequence_Size(val); for (Py_ssize_t i = 0; i < len; i++) { Object v(PySequence_GetItem(val, i)); if (!CheckAttrsVal(v, false)) return false; } return true; } return PyErr_Format(PyExc_TypeError, "Attribute dictionary attrs must be" " integers, buffers, bytes, %s", allowSeq ? "strings, or sequences" : "or strings") != 0; } static PyObject* _CheckAttrsDict(PyObject* attrs) { // The attrs_before dictionary must be keys to integer values. If valid and non-empty, // increment the reference count and return the pointer to indicate the calling code should // keep it. If empty, just return zero which indicates to the calling code it should not // keep the value. If an error occurs, set an error. The calling code must look for this // in the zero case. // We already know this is a dictionary. if (PyDict_Size(attrs) == 0) return 0; Py_ssize_t pos = 0; PyObject* key = 0; PyObject* value = 0; while (PyDict_Next(attrs, &pos, &key, &value)) { if (!PyLong_Check(key)) return PyErr_Format(PyExc_TypeError, "Attribute dictionary keys must be integers"); if (!CheckAttrsVal(value, true)) return 0; } Py_INCREF(attrs); return attrs; } // Map DB API recommended keywords to ODBC keywords. struct keywordmap { const char* oldname; const char* newname; PyObject* newnameObject; // PyString object version of newname, created as needed. }; static keywordmap keywordmaps[] = { { "user", "uid", 0 }, { "password", "pwd", 0 }, { "host", "server", 0 }, }; static PyObject* mod_connect(PyObject* self, PyObject* args, PyObject* kwargs) { UNUSED(self); Object pConnectString; int fAutoCommit = 0; int fReadOnly = 0; long timeout = 0; Object encoding; Object attrs_before; // Optional connect attrs set before connecting Py_ssize_t size = args ? PyTuple_Size(args) : 0; if (size > 1) { PyErr_SetString(PyExc_TypeError, "function takes at most 1 non-keyword argument"); return 0; } if (size == 1) { if (!PyUnicode_Check(PyTuple_GET_ITEM(args, 0)) && !PyUnicode_Check(PyTuple_GET_ITEM(args, 0))) return PyErr_Format(PyExc_TypeError, "argument 1 must be a string or unicode object"); pConnectString.Attach(PyUnicode_FromObject(PyTuple_GetItem(args, 0))); if (!pConnectString.IsValid()) return 0; } if (kwargs && PyDict_Size(kwargs) > 0) { Object partsdict(PyDict_New()); if (!partsdict.IsValid()) return 0; Py_ssize_t pos = 0; PyObject* key = 0; PyObject* value = 0; Object okey; // in case we need to allocate a new key while (PyDict_Next(kwargs, &pos, &key, &value)) { if (!PyUnicode_Check(key)) return PyErr_Format(PyExc_TypeError, "Dictionary keys passed to connect must be strings"); // // Note: key and value are *borrowed*. // // // Check for the two non-connection string keywords we accept. (If we get many more of these, create something // // table driven. Are we sure there isn't a Python function to parse keywords but leave those it doesn't know?) // const char* szKey = PyUnicode_AsString(key); if (PyUnicode_CompareWithASCIIString(key, "autocommit") == 0) { fAutoCommit = PyObject_IsTrue(value); continue; } if (PyUnicode_CompareWithASCIIString(key, "timeout") == 0) { timeout = PyLong_AsLong(value); if (PyErr_Occurred()) return 0; continue; } if (PyUnicode_CompareWithASCIIString(key, "readonly") == 0) { fReadOnly = PyObject_IsTrue(value); continue; } if (PyUnicode_CompareWithASCIIString(key, "attrs_before") == 0 && PyDict_Check(value)) { attrs_before = _CheckAttrsDict(value); if (PyErr_Occurred()) return 0; continue; } if (PyUnicode_CompareWithASCIIString(key, "encoding") == 0) { if (!PyUnicode_Check(value)) return PyErr_Format(PyExc_TypeError, "encoding must be a string"); encoding = value; continue; } // Map DB API recommended names to ODBC names (e.g. user --> uid). for (size_t i = 0; i < _countof(keywordmaps); i++) { if (PyUnicode_CompareWithASCIIString(key, keywordmaps[i].oldname) == 0) { if (keywordmaps[i].newnameObject == 0) { keywordmaps[i].newnameObject = PyUnicode_FromString(keywordmaps[i].newname); if (keywordmaps[i].newnameObject == 0) return 0; } key = keywordmaps[i].newnameObject; break; } } PyObject* str = PyObject_Str(value); // convert if necessary if (!str) return 0; if (PyDict_SetItem(partsdict.Get(), key, str) == -1) { Py_XDECREF(str); return 0; } Py_XDECREF(str); } if (PyDict_Size(partsdict.Get())) pConnectString.Attach(MakeConnectionString(pConnectString.Get(), partsdict)); } if (!pConnectString.IsValid()) return PyErr_Format(PyExc_TypeError, "no connection information was passed"); if (henv == SQL_NULL_HANDLE) { if (!AllocateEnv()) return 0; } return (PyObject*)Connection_New(pConnectString.Get(), fAutoCommit != 0, timeout, fReadOnly != 0, attrs_before.Detach(), encoding); } static PyObject* mod_drivers(PyObject* self) { UNUSED(self); if (henv == SQL_NULL_HANDLE && !AllocateEnv()) return 0; Object result(PyList_New(0)); if (!result) return 0; SQLCHAR szDriverDesc[500]; SWORD cbDriverDesc; SWORD cbAttrs; SQLRETURN ret; SQLUSMALLINT nDirection = SQL_FETCH_FIRST; for (;;) { ret = SQLDrivers(henv, nDirection, szDriverDesc, _countof(szDriverDesc), &cbDriverDesc, 0, 0, &cbAttrs); if (!SQL_SUCCEEDED(ret)) break; // REVIEW: This is another reason why we really need a factory that we can use. At this // point we don't have a global text encoding that we can assume for this. Somehow it // seems to be working to use UTF-8, even on Windows. Object name(PyUnicode_FromString((const char*)szDriverDesc)); if (!name) return 0; if (PyList_Append(result, name.Get()) != 0) return 0; name.Detach(); nDirection = SQL_FETCH_NEXT; } if (ret != SQL_NO_DATA) { Py_DECREF(result); return RaiseErrorFromHandle(0, "SQLDrivers", SQL_NULL_HANDLE, SQL_NULL_HANDLE); } return result.Detach(); } static PyObject* mod_datasources(PyObject* self) { UNUSED(self); if (henv == SQL_NULL_HANDLE && !AllocateEnv()) return 0; PyObject* result = PyDict_New(); if (!result) return 0; // Using a buffer larger than SQL_MAX_DSN_LENGTH + 1 for systems that ignore it. #if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__) SQLWCHAR szDSN[500]; SQLWCHAR szDesc[500]; #else SQLCHAR szDSN[500]; SQLCHAR szDesc[500]; #endif SWORD cbDSN; SWORD cbDesc; SQLUSMALLINT nDirection = SQL_FETCH_FIRST; SQLRETURN ret; for (;;) { #if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__) // wchar_t and UTF-16 on Windows ret = SQLDataSourcesW(henv, nDirection, szDSN, _countof(szDSN), &cbDSN, szDesc, _countof(szDesc), &cbDesc); if (!SQL_SUCCEEDED(ret)) break; int byteorder = BYTEORDER_NATIVE; PyObject* key = PyUnicode_DecodeUTF16((char*)szDSN, cbDSN * sizeof(wchar_t), "strict", &byteorder); PyObject* val = PyUnicode_DecodeUTF16((char*)szDesc, cbDesc * sizeof(wchar_t), "strict", &byteorder); #else // UTF-8 ret = SQLDataSources(henv, nDirection, szDSN, _countof(szDSN), &cbDSN, szDesc, _countof(szDesc), &cbDesc); if (!SQL_SUCCEEDED(ret)) break; PyObject* key = PyUnicode_FromString((const char*)szDSN); PyObject* val = PyUnicode_FromString((const char*)szDesc); #endif if(key && val) PyDict_SetItem(result, key, val); nDirection = SQL_FETCH_NEXT; } if (ret != SQL_NO_DATA) { Py_DECREF(result); return RaiseErrorFromHandle(0, "SQLDataSources", SQL_NULL_HANDLE, SQL_NULL_HANDLE); } return result; } static PyObject* mod_timefromticks(PyObject* self, PyObject* args) { UNUSED(self); PyObject* num; if (!PyArg_ParseTuple(args, "O", &num)) return 0; if (!PyNumber_Check(num)) return PyErr_Format(PyExc_TypeError, "TimeFromTicks requires a number."); Object l(PyNumber_Long(num)); if (!l) return 0; time_t t = PyLong_AsLong(num); struct tm* fields = localtime(&t); return PyTime_FromTime(fields->tm_hour, fields->tm_min, fields->tm_sec, 0); } static PyObject* mod_datefromticks(PyObject* self, PyObject* args) { UNUSED(self); return PyDate_FromTimestamp(args); } static PyObject* mod_timestampfromticks(PyObject* self, PyObject* args) { UNUSED(self); return PyDateTime_FromTimestamp(args); } static PyObject* mod_setdecimalsep(PyObject* self, PyObject* args) { UNUSED(self); const char* type = "U"; PyObject* p; if (!PyArg_ParseTuple(args, type, &p)) return 0; if (!SetDecimalPoint(p)) return 0; Py_RETURN_NONE; } static PyObject* mod_getdecimalsep(PyObject* self) { UNUSED(self); return GetDecimalPoint(); } static char connect_doc[] = "connect(str, autocommit=False, timeout=0, **kwargs) --> Connection\n" "\n" "Accepts an ODBC connection string and returns a new Connection object.\n" "\n" "The connection string will be passed to SQLDriverConnect, so a DSN connection\n" "can be created using:\n" "\n" " cnxn = pyodbc.connect('DSN=DataSourceName;UID=user;PWD=password')\n" "\n" "To connect without requiring a DSN, specify the driver and connection\n" "information:\n" "\n" " DRIVER={SQL Server};SERVER=localhost;DATABASE=testdb;UID=user;PWD=password\n" "\n" "Note the use of braces when a value contains spaces. Refer to SQLDriverConnect\n" "documentation or the documentation of your ODBC driver for details.\n" "\n" "The connection string can be passed as the string `str`, as a list of keywords,\n" "or a combination of the two. Any keywords except autocommit and timeout\n" "(see below) are simply added to the connection string.\n" "\n" " connect('server=localhost;user=me')\n" " connect(server='localhost', user='me')\n" " connect('server=localhost', user='me')\n" "\n" "The DB API recommends the keywords 'user', 'password', and 'host', but these\n" "are not valid ODBC keywords, so these will be converted to 'uid', 'pwd', and\n" "'server'.\n" "\n" "Special Keywords\n" "\n" "The following specal keywords are processed by pyodbc and are not added to the\n" "connection string. (If you must use these in your connection string, pass them\n" "as a string, not as keywords.)\n" "\n" " autocommit\n" " If False or zero, the default, transactions are created automatically as\n" " defined in the DB API 2. If True or non-zero, the connection is put into\n" " ODBC autocommit mode and statements are committed automatically.\n" " \n" " timeout\n" " An integer login timeout in seconds, used to set the SQL_ATTR_LOGIN_TIMEOUT\n" " attribute of the connection. The default is 0 which means the database's\n" " default timeout, if any, is used.\n"; static char timefromticks_doc[] = "TimeFromTicks(ticks) --> datetime.time\n" "\n" "Returns a time object initialized from the given ticks value (number of seconds\n" "since the epoch; see the documentation of the standard Python time module for\n" "details)."; static char datefromticks_doc[] = "DateFromTicks(ticks) --> datetime.date\n" \ "\n" \ "Returns a date object initialized from the given ticks value (number of seconds\n" \ "since the epoch; see the documentation of the standard Python time module for\n" \ "details)."; static char timestampfromticks_doc[] = "TimestampFromTicks(ticks) --> datetime.datetime\n" \ "\n" \ "Returns a datetime object initialized from the given ticks value (number of\n" \ "seconds since the epoch; see the documentation of the standard Python time\n" \ "module for details"; static char drivers_doc[] = "drivers() --> [ DriverName1, DriverName2 ... DriverNameN ]\n" \ "\n" \ "Returns a list of installed drivers."; static char datasources_doc[] = "dataSources() --> { DSN : Description }\n" \ "\n" \ "Returns a dictionary mapping available DSNs to their descriptions."; static char setdecimalsep_doc[] = "setDecimalSeparator(string) -> None\n" \ "\n" \ "Sets the decimal separator character used when parsing NUMERIC from the database."; static char getdecimalsep_doc[] = "getDecimalSeparator() -> string\n" \ "\n" \ "Gets the decimal separator character used when parsing NUMERIC from the database."; static PyMethodDef pyodbc_methods[] = { { "connect", (PyCFunction)mod_connect, METH_VARARGS|METH_KEYWORDS, connect_doc }, { "TimeFromTicks", (PyCFunction)mod_timefromticks, METH_VARARGS, timefromticks_doc }, { "DateFromTicks", (PyCFunction)mod_datefromticks, METH_VARARGS, datefromticks_doc }, { "setDecimalSeparator", (PyCFunction)mod_setdecimalsep, METH_VARARGS, setdecimalsep_doc }, { "getDecimalSeparator", (PyCFunction)mod_getdecimalsep, METH_NOARGS, getdecimalsep_doc }, { "TimestampFromTicks", (PyCFunction)mod_timestampfromticks, METH_VARARGS, timestampfromticks_doc }, { "drivers", (PyCFunction)mod_drivers, METH_NOARGS, drivers_doc }, { "dataSources", (PyCFunction)mod_datasources, METH_NOARGS, datasources_doc }, { 0, 0, 0, 0 } }; static void ErrorInit() { // Called during startup to initialize any variables that will be freed by ErrorCleanup. Error = 0; Warning = 0; InterfaceError = 0; DatabaseError = 0; InternalError = 0; OperationalError = 0; ProgrammingError = 0; IntegrityError = 0; DataError = 0; NotSupportedError = 0; } static void ErrorCleanup() { // Called when an error occurs during initialization to release any objects we may have accessed. Make sure each // item released was initialized to zero. (Static objects are -- non-statics should be initialized in ErrorInit.) Py_XDECREF(Error); Py_XDECREF(Warning); Py_XDECREF(InterfaceError); Py_XDECREF(DatabaseError); Py_XDECREF(InternalError); Py_XDECREF(OperationalError); Py_XDECREF(ProgrammingError); Py_XDECREF(IntegrityError); Py_XDECREF(DataError); Py_XDECREF(NotSupportedError); } struct ConstantDef { const char* szName; int value; }; #define MAKECONST(v) { #v, v } static const ConstantDef aConstants[] = { MAKECONST(SQL_WMETADATA), MAKECONST(SQL_UNKNOWN_TYPE), MAKECONST(SQL_CHAR), MAKECONST(SQL_VARCHAR), MAKECONST(SQL_LONGVARCHAR), MAKECONST(SQL_WCHAR), MAKECONST(SQL_WVARCHAR), MAKECONST(SQL_WLONGVARCHAR), MAKECONST(SQL_DECIMAL), MAKECONST(SQL_NUMERIC), MAKECONST(SQL_SMALLINT), MAKECONST(SQL_INTEGER), MAKECONST(SQL_REAL), MAKECONST(SQL_FLOAT), MAKECONST(SQL_DOUBLE), MAKECONST(SQL_BIT), MAKECONST(SQL_TINYINT), MAKECONST(SQL_BIGINT), MAKECONST(SQL_BINARY), MAKECONST(SQL_VARBINARY), MAKECONST(SQL_LONGVARBINARY), MAKECONST(SQL_TYPE_DATE), MAKECONST(SQL_TYPE_TIME), MAKECONST(SQL_TYPE_TIMESTAMP), MAKECONST(SQL_SS_TIME2), MAKECONST(SQL_SS_XML), MAKECONST(SQL_INTERVAL_MONTH), MAKECONST(SQL_INTERVAL_YEAR), MAKECONST(SQL_INTERVAL_YEAR_TO_MONTH), MAKECONST(SQL_INTERVAL_DAY), MAKECONST(SQL_INTERVAL_HOUR), MAKECONST(SQL_INTERVAL_MINUTE), MAKECONST(SQL_INTERVAL_SECOND), MAKECONST(SQL_INTERVAL_DAY_TO_HOUR), MAKECONST(SQL_INTERVAL_DAY_TO_MINUTE), MAKECONST(SQL_INTERVAL_DAY_TO_SECOND), MAKECONST(SQL_INTERVAL_HOUR_TO_MINUTE), MAKECONST(SQL_INTERVAL_HOUR_TO_SECOND), MAKECONST(SQL_INTERVAL_MINUTE_TO_SECOND), MAKECONST(SQL_GUID), MAKECONST(SQL_NULLABLE), MAKECONST(SQL_NO_NULLS), MAKECONST(SQL_NULLABLE_UNKNOWN), // MAKECONST(SQL_INDEX_BTREE), // MAKECONST(SQL_INDEX_CLUSTERED), // MAKECONST(SQL_INDEX_CONTENT), // MAKECONST(SQL_INDEX_HASHED), // MAKECONST(SQL_INDEX_OTHER), MAKECONST(SQL_SCOPE_CURROW), MAKECONST(SQL_SCOPE_TRANSACTION), MAKECONST(SQL_SCOPE_SESSION), MAKECONST(SQL_PC_UNKNOWN), MAKECONST(SQL_PC_NOT_PSEUDO), MAKECONST(SQL_PC_PSEUDO), // SQLGetInfo MAKECONST(SQL_ACCESSIBLE_PROCEDURES), MAKECONST(SQL_ACCESSIBLE_TABLES), MAKECONST(SQL_ACTIVE_ENVIRONMENTS), MAKECONST(SQL_AGGREGATE_FUNCTIONS), MAKECONST(SQL_ALTER_DOMAIN), MAKECONST(SQL_ALTER_TABLE), MAKECONST(SQL_ASYNC_MODE), MAKECONST(SQL_BATCH_ROW_COUNT), MAKECONST(SQL_BATCH_SUPPORT), MAKECONST(SQL_BOOKMARK_PERSISTENCE), MAKECONST(SQL_CATALOG_LOCATION), MAKECONST(SQL_CATALOG_NAME), MAKECONST(SQL_CATALOG_NAME_SEPARATOR), MAKECONST(SQL_CATALOG_TERM), MAKECONST(SQL_CATALOG_USAGE), MAKECONST(SQL_COLLATION_SEQ), MAKECONST(SQL_COLUMN_ALIAS), MAKECONST(SQL_CONCAT_NULL_BEHAVIOR), MAKECONST(SQL_CONVERT_VARCHAR), MAKECONST(SQL_CORRELATION_NAME), MAKECONST(SQL_CREATE_ASSERTION), MAKECONST(SQL_CREATE_CHARACTER_SET), MAKECONST(SQL_CREATE_COLLATION), MAKECONST(SQL_CREATE_DOMAIN), MAKECONST(SQL_CREATE_SCHEMA), MAKECONST(SQL_CREATE_TABLE), MAKECONST(SQL_CREATE_TRANSLATION), MAKECONST(SQL_CREATE_VIEW), MAKECONST(SQL_CURSOR_COMMIT_BEHAVIOR), MAKECONST(SQL_CURSOR_ROLLBACK_BEHAVIOR), // MAKECONST(SQL_CURSOR_ROLLBACK_SQL_CURSOR_SENSITIVITY), MAKECONST(SQL_DATABASE_NAME), MAKECONST(SQL_DATA_SOURCE_NAME), MAKECONST(SQL_DATA_SOURCE_READ_ONLY), MAKECONST(SQL_DATETIME_LITERALS), MAKECONST(SQL_DBMS_NAME), MAKECONST(SQL_DBMS_VER), MAKECONST(SQL_DDL_INDEX), MAKECONST(SQL_DEFAULT_TXN_ISOLATION), MAKECONST(SQL_DESCRIBE_PARAMETER), MAKECONST(SQL_DM_VER), MAKECONST(SQL_DRIVER_HDESC), MAKECONST(SQL_DRIVER_HENV), MAKECONST(SQL_DRIVER_HLIB), MAKECONST(SQL_DRIVER_HSTMT), MAKECONST(SQL_DRIVER_NAME), MAKECONST(SQL_DRIVER_ODBC_VER), MAKECONST(SQL_DRIVER_VER), MAKECONST(SQL_DROP_ASSERTION), MAKECONST(SQL_DROP_CHARACTER_SET), MAKECONST(SQL_DROP_COLLATION), MAKECONST(SQL_DROP_DOMAIN), MAKECONST(SQL_DROP_SCHEMA), MAKECONST(SQL_DROP_TABLE), MAKECONST(SQL_DROP_TRANSLATION), MAKECONST(SQL_DROP_VIEW), MAKECONST(SQL_DYNAMIC_CURSOR_ATTRIBUTES1), MAKECONST(SQL_DYNAMIC_CURSOR_ATTRIBUTES2), MAKECONST(SQL_EXPRESSIONS_IN_ORDERBY), MAKECONST(SQL_FILE_USAGE), MAKECONST(SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1), MAKECONST(SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2), MAKECONST(SQL_GETDATA_EXTENSIONS), MAKECONST(SQL_GROUP_BY), MAKECONST(SQL_IDENTIFIER_CASE), MAKECONST(SQL_IDENTIFIER_QUOTE_CHAR), MAKECONST(SQL_INDEX_KEYWORDS), MAKECONST(SQL_INFO_SCHEMA_VIEWS), MAKECONST(SQL_INSERT_STATEMENT), MAKECONST(SQL_INTEGRITY), MAKECONST(SQL_KEYSET_CURSOR_ATTRIBUTES1), MAKECONST(SQL_KEYSET_CURSOR_ATTRIBUTES2), MAKECONST(SQL_KEYWORDS), MAKECONST(SQL_LIKE_ESCAPE_CLAUSE), MAKECONST(SQL_MAX_ASYNC_CONCURRENT_STATEMENTS), MAKECONST(SQL_MAX_BINARY_LITERAL_LEN), MAKECONST(SQL_MAX_CATALOG_NAME_LEN), MAKECONST(SQL_MAX_CHAR_LITERAL_LEN), MAKECONST(SQL_MAX_COLUMNS_IN_GROUP_BY), MAKECONST(SQL_MAX_COLUMNS_IN_INDEX), MAKECONST(SQL_MAX_COLUMNS_IN_ORDER_BY), MAKECONST(SQL_MAX_COLUMNS_IN_SELECT), MAKECONST(SQL_MAX_COLUMNS_IN_TABLE), MAKECONST(SQL_MAX_COLUMN_NAME_LEN), MAKECONST(SQL_MAX_CONCURRENT_ACTIVITIES), MAKECONST(SQL_MAX_CURSOR_NAME_LEN), MAKECONST(SQL_MAX_DRIVER_CONNECTIONS), MAKECONST(SQL_MAX_IDENTIFIER_LEN), MAKECONST(SQL_MAX_INDEX_SIZE), MAKECONST(SQL_MAX_PROCEDURE_NAME_LEN), MAKECONST(SQL_MAX_ROW_SIZE), MAKECONST(SQL_MAX_ROW_SIZE_INCLUDES_LONG), MAKECONST(SQL_MAX_SCHEMA_NAME_LEN), MAKECONST(SQL_MAX_STATEMENT_LEN), MAKECONST(SQL_MAX_TABLES_IN_SELECT), MAKECONST(SQL_MAX_TABLE_NAME_LEN), MAKECONST(SQL_MAX_USER_NAME_LEN), MAKECONST(SQL_MULTIPLE_ACTIVE_TXN), MAKECONST(SQL_MULT_RESULT_SETS), MAKECONST(SQL_NEED_LONG_DATA_LEN), MAKECONST(SQL_NON_NULLABLE_COLUMNS), MAKECONST(SQL_NULL_COLLATION), MAKECONST(SQL_NUMERIC_FUNCTIONS), MAKECONST(SQL_ODBC_INTERFACE_CONFORMANCE), MAKECONST(SQL_ODBC_VER), MAKECONST(SQL_OJ_CAPABILITIES), MAKECONST(SQL_ORDER_BY_COLUMNS_IN_SELECT), MAKECONST(SQL_PARAM_ARRAY_ROW_COUNTS), MAKECONST(SQL_PARAM_ARRAY_SELECTS), MAKECONST(SQL_PARAM_TYPE_UNKNOWN), MAKECONST(SQL_PARAM_INPUT), MAKECONST(SQL_PARAM_INPUT_OUTPUT), MAKECONST(SQL_PARAM_OUTPUT), MAKECONST(SQL_RETURN_VALUE), MAKECONST(SQL_RESULT_COL), MAKECONST(SQL_PROCEDURES), MAKECONST(SQL_PROCEDURE_TERM), MAKECONST(SQL_QUOTED_IDENTIFIER_CASE), MAKECONST(SQL_ROW_UPDATES), MAKECONST(SQL_SCHEMA_TERM), MAKECONST(SQL_SCHEMA_USAGE), MAKECONST(SQL_SCROLL_OPTIONS), MAKECONST(SQL_SEARCH_PATTERN_ESCAPE), MAKECONST(SQL_SERVER_NAME), MAKECONST(SQL_SPECIAL_CHARACTERS), MAKECONST(SQL_SQL92_DATETIME_FUNCTIONS), MAKECONST(SQL_SQL92_FOREIGN_KEY_DELETE_RULE), MAKECONST(SQL_SQL92_FOREIGN_KEY_UPDATE_RULE), MAKECONST(SQL_SQL92_GRANT), MAKECONST(SQL_SQL92_NUMERIC_VALUE_FUNCTIONS), MAKECONST(SQL_SQL92_PREDICATES), MAKECONST(SQL_SQL92_RELATIONAL_JOIN_OPERATORS), MAKECONST(SQL_SQL92_REVOKE), MAKECONST(SQL_SQL92_ROW_VALUE_CONSTRUCTOR), MAKECONST(SQL_SQL92_STRING_FUNCTIONS), MAKECONST(SQL_SQL92_VALUE_EXPRESSIONS), MAKECONST(SQL_SQL_CONFORMANCE), MAKECONST(SQL_STANDARD_CLI_CONFORMANCE), MAKECONST(SQL_STATIC_CURSOR_ATTRIBUTES1), MAKECONST(SQL_STATIC_CURSOR_ATTRIBUTES2), MAKECONST(SQL_STRING_FUNCTIONS), MAKECONST(SQL_SUBQUERIES), MAKECONST(SQL_SYSTEM_FUNCTIONS), MAKECONST(SQL_TABLE_TERM), MAKECONST(SQL_TIMEDATE_ADD_INTERVALS), MAKECONST(SQL_TIMEDATE_DIFF_INTERVALS), MAKECONST(SQL_TIMEDATE_FUNCTIONS), MAKECONST(SQL_TXN_CAPABLE), MAKECONST(SQL_TXN_ISOLATION_OPTION), MAKECONST(SQL_UNION), MAKECONST(SQL_USER_NAME), MAKECONST(SQL_XOPEN_CLI_YEAR), // Connection Attributes MAKECONST(SQL_ACCESS_MODE), MAKECONST(SQL_ATTR_ACCESS_MODE), MAKECONST(SQL_AUTOCOMMIT), MAKECONST(SQL_ATTR_AUTOCOMMIT), MAKECONST(SQL_LOGIN_TIMEOUT), MAKECONST(SQL_ATTR_LOGIN_TIMEOUT), MAKECONST(SQL_OPT_TRACE), MAKECONST(SQL_ATTR_TRACE), MAKECONST(SQL_OPT_TRACEFILE), MAKECONST(SQL_ATTR_TRACEFILE), MAKECONST(SQL_TRANSLATE_DLL), MAKECONST(SQL_ATTR_TRANSLATE_LIB), MAKECONST(SQL_TRANSLATE_OPTION), MAKECONST(SQL_ATTR_TRANSLATE_OPTION), MAKECONST(SQL_TXN_ISOLATION), MAKECONST(SQL_ATTR_TXN_ISOLATION), MAKECONST(SQL_CURRENT_QUALIFIER), MAKECONST(SQL_ATTR_CURRENT_CATALOG), MAKECONST(SQL_ODBC_CURSORS), MAKECONST(SQL_ATTR_ODBC_CURSORS), MAKECONST(SQL_QUIET_MODE), MAKECONST(SQL_ATTR_QUIET_MODE), MAKECONST(SQL_PACKET_SIZE), MAKECONST(SQL_ATTR_ANSI_APP), // SQL_CONVERT_X MAKECONST(SQL_CONVERT_FUNCTIONS), MAKECONST(SQL_CONVERT_BIGINT), MAKECONST(SQL_CONVERT_BINARY), MAKECONST(SQL_CONVERT_BIT), MAKECONST(SQL_CONVERT_CHAR), MAKECONST(SQL_CONVERT_DATE), MAKECONST(SQL_CONVERT_DECIMAL), MAKECONST(SQL_CONVERT_DOUBLE), MAKECONST(SQL_CONVERT_FLOAT), MAKECONST(SQL_CONVERT_GUID), MAKECONST(SQL_CONVERT_INTEGER), MAKECONST(SQL_CONVERT_INTERVAL_DAY_TIME), MAKECONST(SQL_CONVERT_INTERVAL_YEAR_MONTH), MAKECONST(SQL_CONVERT_LONGVARBINARY), MAKECONST(SQL_CONVERT_LONGVARCHAR), MAKECONST(SQL_CONVERT_NUMERIC), MAKECONST(SQL_CONVERT_REAL), MAKECONST(SQL_CONVERT_SMALLINT), MAKECONST(SQL_CONVERT_TIME), MAKECONST(SQL_CONVERT_TIMESTAMP), MAKECONST(SQL_CONVERT_TINYINT), MAKECONST(SQL_CONVERT_VARBINARY), MAKECONST(SQL_CONVERT_VARCHAR), MAKECONST(SQL_CONVERT_WCHAR), MAKECONST(SQL_CONVERT_WLONGVARCHAR), MAKECONST(SQL_CONVERT_WVARCHAR), // SQLSetConnectAttr transaction isolation MAKECONST(SQL_ATTR_TXN_ISOLATION), MAKECONST(SQL_TXN_READ_UNCOMMITTED), MAKECONST(SQL_TXN_READ_COMMITTED), MAKECONST(SQL_TXN_REPEATABLE_READ), MAKECONST(SQL_TXN_SERIALIZABLE), // Outer Join Capabilities MAKECONST(SQL_OJ_LEFT), MAKECONST(SQL_OJ_RIGHT), MAKECONST(SQL_OJ_FULL), MAKECONST(SQL_OJ_NESTED), MAKECONST(SQL_OJ_NOT_ORDERED), MAKECONST(SQL_OJ_INNER), MAKECONST(SQL_OJ_ALL_COMPARISON_OPS), }; static bool CreateExceptions() { for (unsigned int i = 0; i < _countof(aExcInfos); i++) { ExcInfo& info = aExcInfos[i]; PyObject* classdict = PyDict_New(); if (!classdict) return false; PyObject* doc = PyUnicode_FromString(info.szDoc); if (!doc) { Py_DECREF(classdict); return false; } PyDict_SetItemString(classdict, "__doc__", doc); Py_DECREF(doc); *info.ppexc = PyErr_NewException((char*)info.szFullName, *info.ppexcParent, classdict); if (*info.ppexc == 0) { Py_DECREF(classdict); return false; } // Keep a reference for our internal (C++) use. Py_INCREF(*info.ppexc); PyModule_AddObject(pModule, (char*)info.szName, *info.ppexc); } return true; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "pyodbc", // m_name module_doc, -1, // m_size pyodbc_methods, // m_methods 0, // m_reload 0, // m_traverse 0, // m_clear 0, // m_free }; PyMODINIT_FUNC PyInit_pyodbc() { ErrorInit(); if (PyType_Ready(&ConnectionType) < 0 || PyType_Ready(&CursorType) < 0 || PyType_Ready(&RowType) < 0 || PyType_Ready(&CnxnInfoType) < 0) return 0; Object module; module.Attach(PyModule_Create(&moduledef)); pModule = module.Get(); if (!module || !import_types() || !CreateExceptions()) return 0; const char* szVersion = TOSTRING(PYODBC_VERSION); PyModule_AddStringConstant(module, "version", (char*)szVersion); PyModule_AddIntConstant(module, "threadsafety", 1); PyModule_AddStringConstant(module, "apilevel", "2.0"); PyModule_AddStringConstant(module, "paramstyle", "qmark"); PyModule_AddStringConstant(module, "odbcversion", "3.X"); PyModule_AddObject(module, "pooling", Py_True); Py_INCREF(Py_True); PyModule_AddObject(module, "lowercase", Py_False); Py_INCREF(Py_False); PyModule_AddObject(module, "native_uuid", Py_False); Py_INCREF(Py_False); PyModule_AddObject(module, "Connection", (PyObject*)&ConnectionType); Py_INCREF((PyObject*)&ConnectionType); PyModule_AddObject(module, "Cursor", (PyObject*)&CursorType); Py_INCREF((PyObject*)&CursorType); PyModule_AddObject(module, "Row", (PyObject*)&RowType); Py_INCREF((PyObject*)&RowType); // Add the SQL_XXX defines from ODBC. for (unsigned int i = 0; i < _countof(aConstants); i++) PyModule_AddIntConstant(module, (char*)aConstants[i].szName, aConstants[i].value); PyModule_AddObject(module, "Date", (PyObject*)PyDateTimeAPI->DateType); Py_INCREF((PyObject*)PyDateTimeAPI->DateType); PyModule_AddObject(module, "Time", (PyObject*)PyDateTimeAPI->TimeType); Py_INCREF((PyObject*)PyDateTimeAPI->TimeType); PyModule_AddObject(module, "Timestamp", (PyObject*)PyDateTimeAPI->DateTimeType); Py_INCREF((PyObject*)PyDateTimeAPI->DateTimeType); PyModule_AddObject(module, "DATETIME", (PyObject*)PyDateTimeAPI->DateTimeType); Py_INCREF((PyObject*)PyDateTimeAPI->DateTimeType); PyModule_AddObject(module, "STRING", (PyObject*)&PyUnicode_Type); Py_INCREF((PyObject*)&PyUnicode_Type); PyModule_AddObject(module, "NUMBER", (PyObject*)&PyFloat_Type); Py_INCREF((PyObject*)&PyFloat_Type); PyModule_AddObject(module, "ROWID", (PyObject*)&PyLong_Type); Py_INCREF((PyObject*)&PyLong_Type); PyObject* binary_type; binary_type = (PyObject*)&PyByteArray_Type; PyModule_AddObject(module, "BINARY", binary_type); Py_INCREF(binary_type); PyModule_AddObject(module, "Binary", binary_type); Py_INCREF(binary_type); assert(null_binary != 0); // must be initialized first PyModule_AddObject(module, "BinaryNull", null_binary); PyModule_AddIntConstant(module, "SQLWCHAR_SIZE", sizeof(SQLWCHAR)); if (!PyErr_Occurred()) { module.Detach(); } else { ErrorCleanup(); } return pModule; } #ifdef WINVER BOOL WINAPI DllMain(HINSTANCE hMod, DWORD fdwReason, LPVOID lpvReserved) { UNUSED(hMod, fdwReason, lpvReserved); return TRUE; } #endif static PyObject* MakeConnectionString(PyObject* existing, PyObject* parts) { // Creates a connection string from an optional existing connection string plus a dictionary of keyword value // pairs. // // existing // Optional Unicode connection string we will be appending to. Used when a partial connection string is passed // in, followed by keyword parameters: // // connect("driver={x};database={y}", user='z') // // parts // A dictionary of text keywords and text values that will be appended. assert(PyUnicode_Check(existing)); Py_ssize_t pos = 0; PyObject* key = 0; PyObject* value = 0; Py_ssize_t length = 0; // length in *characters* int result_kind = PyUnicode_1BYTE_KIND; if (existing) { length = PyUnicode_GET_LENGTH(existing) + 1; // + 1 to add a trailing semicolon int kind = PyUnicode_KIND(existing); if (result_kind < kind) result_kind = kind; } while (PyDict_Next(parts, &pos, &key, &value)) { // key=value; length += PyUnicode_GET_LENGTH(key) + 1; length += PyUnicode_GET_LENGTH(value) + 1; int kind = PyUnicode_KIND(key); if (result_kind < kind) result_kind = kind; kind = PyUnicode_KIND(value); if (result_kind < kind) result_kind = kind; } Py_UCS4 maxchar = 0x10ffff; if (result_kind == PyUnicode_2BYTE_KIND) maxchar = 0xffff; else if (result_kind == PyUnicode_1BYTE_KIND) maxchar = 0xff; PyObject* result = PyUnicode_New(length, maxchar); if (!result) return 0; Py_ssize_t offset = 0; if (existing) { Py_ssize_t count = PyUnicode_GET_LENGTH(existing); Py_ssize_t n = PyUnicode_CopyCharacters(result, offset, existing, 0, count); if (n < 0) return 0; offset += count; PyUnicode_WriteChar(result, offset++, (Py_UCS4)';'); } pos = 0; while (PyDict_Next(parts, &pos, &key, &value)) { Py_ssize_t count = PyUnicode_GET_LENGTH(key); Py_ssize_t n = PyUnicode_CopyCharacters(result, offset, key, 0, count); if (n < 0) return 0; offset += count; PyUnicode_WriteChar(result, offset++, (Py_UCS4)'='); count = PyUnicode_GET_LENGTH(value); n = PyUnicode_CopyCharacters(result, offset, value, 0, count); if (n < 0) return 0; offset += count; PyUnicode_WriteChar(result, offset++, (Py_UCS4)';'); } assert(offset == length); return result; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/pyodbcmodule.h0000644000175100001770000000451414560207600016034 0ustar00runnerdocker/* * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _PYPGMODULE_H #define _PYPGMODULE_H #define SQL_WMETADATA -888 // This is a custom constant that can be passed to Connection.setencoding. Pick a value that // is very different from SQL_CHAR and SQL_WCHAR and similar items. extern PyObject* Error; extern PyObject* Warning; extern PyObject* InterfaceError; extern PyObject* DatabaseError; extern PyObject* InternalError; extern PyObject* OperationalError; extern PyObject* ProgrammingError; extern PyObject* IntegrityError; extern PyObject* DataError; extern PyObject* NotSupportedError; /* Returns the given class, specific to the current thread's interpreter. For performance these are cached for each thread. This is for internal use only, so we'll cache using only the class name. Make sure they are unique. (That is, don't try to import classes with the same name from two different modules.) */ PyObject* GetClassForThread(const char* szModule, const char* szClass); bool IsInstanceForThread(PyObject* param, const char* szModule, const char* szClass, PyObject** pcls); extern PyObject* null_binary; extern HENV henv; extern PyTypeObject RowType; extern PyTypeObject CursorType; extern PyTypeObject ConnectionType; // Thd pyodbc module. extern PyObject* pModule; inline bool lowercase() { return PyObject_GetAttrString(pModule, "lowercase") == Py_True; } bool UseNativeUUID(); // Returns True if pyodbc.native_uuid is true, meaning uuid.UUID objects should be returned. #endif // _PYPGMODULE_H ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/resource.h0000644000175100001770000000060214560207600015167 0ustar00runnerdocker//{{NO_DEPENDENCIES}} // Microsoft Visual C++ generated include file. // Used by pyodbc.rc // Next default values for new objects // #ifdef APSTUDIO_INVOKED #ifndef APSTUDIO_READONLY_SYMBOLS #define _APS_NEXT_RESOURCE_VALUE 101 #define _APS_NEXT_COMMAND_VALUE 40001 #define _APS_NEXT_CONTROL_VALUE 1001 #define _APS_NEXT_SYMED_VALUE 101 #endif #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/row.cpp0000644000175100001770000003762714560207600014523 0ustar00runnerdocker // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pyodbc.h" #include "pyodbcmodule.h" #include "row.h" #include "wrapper.h" struct Row { // A Row must act like a sequence (a tuple of results) to meet the DB API specification, but we also allow values // to be accessed via lowercased column names. We also supply a `columns` attribute which returns the list of // column names. PyObject_HEAD // cursor.description, accessed as _description PyObject* description; // A Python dictionary mapping from column name to a PyInteger, used to access columns by name. PyObject* map_name_to_index; // The number of values in apValues. Py_ssize_t cValues; // The column values, stored as an array. PyObject** apValues; }; #define Row_Check(op) PyObject_TypeCheck(op, &RowType) #define Row_CheckExact(op) (Py_TYPE(op) == &RowType) void FreeRowValues(Py_ssize_t cValues, PyObject** apValues) { // Frees each pointer in the apValues buffer *and* the buffer itself. if (apValues) { for (Py_ssize_t i = 0; i < cValues; i++) Py_XDECREF(apValues[i]); PyMem_Free(apValues); } } static void Row_dealloc(PyObject* o) { // Note: Now that __newobj__ is available, our variables could be zero... Row* self = (Row*)o; Py_XDECREF(self->description); Py_XDECREF(self->map_name_to_index); FreeRowValues(self->cValues, self->apValues); PyObject_Del(self); } static PyObject* Row_getstate(PyObject* self) { // Returns a tuple containing the saved state. We don't really support empty rows, but unfortunately they can be // created now by the new constructor which was necessary for implementing pickling. In that case (everything is // zero), an empty tuple is returned. // Not exposed. Row* row = (Row*)self; if (row->description == 0) return PyTuple_New(0); Object state(PyTuple_New(2 + row->cValues)); if (!state.IsValid()) return 0; PyTuple_SET_ITEM(state, 0, row->description); PyTuple_SET_ITEM(state, 1, row->map_name_to_index); for (int i = 0; i < row->cValues; i++) PyTuple_SET_ITEM(state, i+2, row->apValues[i]); for (int i = 0; i < PyTuple_GET_SIZE(state); i++) Py_XINCREF(PyTuple_GET_ITEM(state, i)); return state.Detach(); } static PyObject* new_check(PyObject* args) { // We don't support a normal constructor, so only allow this for unpickling. There should be a single arg that was // returned by Row_reduce. Make sure the sizes match. The desc and map should have one entry per column, which // should equal the number of remaining items. if (PyTuple_GET_SIZE(args) < 3) return 0; PyObject* desc = PyTuple_GET_ITEM(args, 0); PyObject* map = PyTuple_GET_ITEM(args, 1); if (!PyTuple_CheckExact(desc) || !PyDict_CheckExact(map)) return 0; Py_ssize_t cols = PyTuple_GET_SIZE(desc); if (PyDict_Size(map) != cols || PyTuple_GET_SIZE(args) - 2 != cols) return 0; PyObject** apValues = (PyObject**)PyMem_Malloc(sizeof(PyObject*) * cols); if (!apValues) return 0; for (int i = 0; i < cols; i++) { apValues[i] = PyTuple_GET_ITEM(args, i+2); Py_INCREF(apValues[i]); } // Row_Internal will incref desc and map. If something goes wrong, it will free apValues. return (PyObject*)Row_InternalNew(desc, map, cols, apValues); } static PyObject* Row_new(PyTypeObject* type, PyObject* args, PyObject* kwargs) { UNUSED(kwargs); PyObject* row = new_check(args); if (row == 0) PyErr_SetString(PyExc_TypeError, "cannot create 'pyodbc.Row' instances"); return row; } Row* Row_InternalNew(PyObject* description, PyObject* map_name_to_index, Py_ssize_t cValues, PyObject** apValues) { // Called by other modules to create rows. Takes ownership of apValues. #ifdef _MSC_VER #pragma warning(disable : 4365) #endif Row* row = PyObject_NEW(Row, &RowType); #ifdef _MSC_VER #pragma warning(default : 4365) #endif if (row) { Py_INCREF(description); row->description = description; Py_INCREF(map_name_to_index); row->map_name_to_index = map_name_to_index; row->apValues = apValues; row->cValues = cValues; } else { FreeRowValues(cValues, apValues); } return row; } static PyObject* Row_getattro(PyObject* o, PyObject* name) { // Called to handle 'row.colname'. Row* self = (Row*)o; PyObject* index = PyDict_GetItem(self->map_name_to_index, name); if (index) { Py_ssize_t i = PyNumber_AsSsize_t(index, 0); Py_INCREF(self->apValues[i]); return self->apValues[i]; } return PyObject_GenericGetAttr(o, name); } static Py_ssize_t Row_length(PyObject* self) { return ((Row*)self)->cValues; } static int Row_contains(PyObject* o, PyObject* el) { // Implementation of contains. The documentation is not good (non-existent?), so I copied the following from the // PySequence_Contains documentation: Return -1 if error; 1 if ob in seq; 0 if ob not in seq. Row* self = (Row*)o; int cmp = 0; for (Py_ssize_t i = 0, c = self->cValues ; cmp == 0 && i < c; ++i) cmp = PyObject_RichCompareBool(el, self->apValues[i], Py_EQ); return cmp; } PyObject* Row_item(PyObject* o, Py_ssize_t i) { // Apparently, negative indexes are handled by magic ;) -- they never make it here. Row* self = (Row*)o; if (i < 0 || i >= self->cValues) { PyErr_SetString(PyExc_IndexError, "tuple index out of range"); return NULL; } Py_INCREF(self->apValues[i]); return self->apValues[i]; } static int Row_ass_item(PyObject* o, Py_ssize_t i, PyObject* v) { // Implements row[i] = value. Row* self = (Row*)o; if (i < 0 || i >= self->cValues) { PyErr_SetString(PyExc_IndexError, "Row assignment index out of range"); return -1; } Py_XDECREF(self->apValues[i]); Py_INCREF(v); self->apValues[i] = v; return 0; } static int Row_setattro(PyObject* o, PyObject *name, PyObject* v) { Row* self = (Row*)o; PyObject* index = PyDict_GetItem(self->map_name_to_index, name); if (index) return Row_ass_item(o, PyNumber_AsSsize_t(index, 0), v); return PyObject_GenericSetAttr(o, name, v); } static PyObject* Row_repr(PyObject* o) { // We want to return the same representation as a tuple. The easiest way is to create a // temporary tuple. I do not consider this something normally used in high performance // areas. Row* self = (Row*)o; Object t(PyTuple_New(self->cValues)); if (!t) return 0; for (Py_ssize_t i = 0; i < self->cValues; i++) { Py_INCREF(self->apValues[i]); PyTuple_SET_ITEM(t.Get(), i, self->apValues[i]); } return PyObject_Repr(t); } static PyObject* Row_richcompare(PyObject* olhs, PyObject* orhs, int op) { if (!Row_Check(olhs) || !Row_Check(orhs)) { Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } Row* lhs = (Row*)olhs; Row* rhs = (Row*)orhs; if (lhs->cValues != rhs->cValues) { // Different sizes, so use the same rules as the tuple class. bool result; switch (op) { case Py_EQ: result = (lhs->cValues == rhs->cValues); break; case Py_GE: result = (lhs->cValues >= rhs->cValues); break; case Py_GT: result = (lhs->cValues > rhs->cValues); break; case Py_LE: result = (lhs->cValues <= rhs->cValues); break; case Py_LT: result = (lhs->cValues < rhs->cValues); break; case Py_NE: result = (lhs->cValues != rhs->cValues); break; default: // Can't get here, but don't have a cross-compiler way to silence this. result = false; } PyObject* p = result ? Py_True : Py_False; Py_INCREF(p); return p; } for (Py_ssize_t i = 0, c = lhs->cValues; i < c; i++) if (!PyObject_RichCompareBool(lhs->apValues[i], rhs->apValues[i], Py_EQ)) return PyObject_RichCompare(lhs->apValues[i], rhs->apValues[i], op); // All items are equal. switch (op) { case Py_EQ: case Py_GE: case Py_LE: Py_RETURN_TRUE; case Py_GT: case Py_LT: case Py_NE: break; } Py_RETURN_FALSE; } static PyObject* Row_subscript(PyObject* o, PyObject* key) { Row* row = (Row*)o; if (PyIndex_Check(key)) { Py_ssize_t i = PyNumber_AsSsize_t(key, PyExc_IndexError); if (i == -1 && PyErr_Occurred()) return 0; if (i < 0) i += row->cValues; if (i < 0 || i >= row->cValues) return PyErr_Format(PyExc_IndexError, "row index out of range index=%d len=%d", (int)i, (int)row->cValues); Py_INCREF(row->apValues[i]); return row->apValues[i]; } if (PySlice_Check(key)) { Py_ssize_t start, stop, step, slicelength; if (PySlice_GetIndicesEx(key, row->cValues, &start, &stop, &step, &slicelength) < 0) return 0; if (slicelength <= 0) return PyTuple_New(0); if (start == 0 && step == 1 && slicelength == row->cValues) { Py_INCREF(o); return o; } Object result(PyTuple_New(slicelength)); if (!result) return 0; for (Py_ssize_t i = 0, index = start; i < slicelength; i++, index += step) { PyTuple_SET_ITEM(result.Get(), i, row->apValues[index]); Py_INCREF(row->apValues[index]); } return result.Detach(); } return PyErr_Format(PyExc_TypeError, "row indices must be integers, not %.200s", Py_TYPE(key)->tp_name); } static PySequenceMethods row_as_sequence = { Row_length, // sq_length 0, // sq_concat 0, // sq_repeat Row_item, // sq_item 0, // was_sq_slice Row_ass_item, // sq_ass_item 0, // sq_ass_slice Row_contains, // sq_contains }; static PyMappingMethods row_as_mapping = { Row_length, // mp_length Row_subscript, // mp_subscript 0, // mp_ass_subscript }; static char description_doc[] = "The Cursor.description sequence from the Cursor that created this row."; static PyMemberDef Row_members[] = { { "cursor_description", T_OBJECT_EX, offsetof(Row, description), READONLY, description_doc }, { 0 } }; static PyObject* Row_reduce(PyObject* self, PyObject* args) { PyObject* state = Row_getstate(self); if (!state) return 0; return Py_BuildValue("ON", Py_TYPE(self), state); } static PyMethodDef Row_methods[] = { { "__reduce__", (PyCFunction)Row_reduce, METH_NOARGS, 0 }, { 0, 0, 0, 0 } }; static char row_doc[] = "Row objects are sequence objects that hold query results.\n" "\n" "They are similar to tuples in that they cannot be resized and new attributes\n" "cannot be added, but individual elements can be replaced. This allows data to\n" "be \"fixed up\" after being fetched. (For example, datetimes may be replaced by\n" "those with time zones attached.)\n" "\n" " row[0] = row[0].replace(tzinfo=timezone)\n" " print row[0]\n" "\n" "Additionally, individual values can be optionally be accessed or replaced by\n" "name. Non-alphanumeric characters are replaced with an underscore.\n" "\n" " cursor.execute(\"select customer_id, [Name With Spaces] from tmp\")\n" " row = cursor.fetchone()\n" " print row.customer_id, row.Name_With_Spaces\n" "\n" "If using this non-standard feature, it is often convenient to specify the name\n" "using the SQL 'as' keyword:\n" "\n" " cursor.execute(\"select count(*) as total from tmp\")\n" " row = cursor.fetchone()\n" " print row.total"; PyTypeObject RowType = { PyVarObject_HEAD_INIT(NULL, 0) "pyodbc.Row", // tp_name sizeof(Row), // tp_basicsize 0, // tp_itemsize Row_dealloc, // tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare Row_repr, // tp_repr 0, // tp_as_number &row_as_sequence, // tp_as_sequence &row_as_mapping, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str Row_getattro, // tp_getattro Row_setattro, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, // tp_flags row_doc, // tp_doc 0, // tp_traverse 0, // tp_clear Row_richcompare, // tp_richcompare 0, // tp_weaklistoffset 0, // tp_iter 0, // tp_iternext Row_methods, // tp_methods Row_members, // tp_members 0, // tp_getset 0, // tp_base 0, // tp_dict 0, // tp_descr_get 0, // tp_descr_set 0, // tp_dictoffset 0, // tp_init 0, // tp_alloc Row_new, // tp_new 0, // tp_free 0, // tp_is_gc 0, // tp_bases 0, // tp_mro 0, // tp_cache 0, // tp_subclasses 0, // tp_weaklist }; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/row.h0000644000175100001770000000307214560207600014153 0ustar00runnerdocker /* * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef ROW_H #define ROW_H struct Row; /* * Used to make a new row from an array of column values. */ Row* Row_InternalNew(PyObject* description, PyObject* map_name_to_index, Py_ssize_t cValues, PyObject** apValues); /* * Dereferences each object in apValues and frees apValue. This is the internal format used by rows. * * cValues: The number of items to free in apValues. * * apValues: The array of values. This can be NULL. */ void FreeRowValues(Py_ssize_t cValues, PyObject** apValues); PyObject* Row_item(PyObject* o, Py_ssize_t i); extern PyTypeObject RowType; #define Row_Check(op) PyObject_TypeCheck(op, &RowType) #define Row_CheckExact(op) (Py_TYPE(op) == &RowType) #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/textenc.cpp0000644000175100001770000000743214560207600015355 0ustar00runnerdocker #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" void SQLWChar::init(PyObject* src, const TextEnc& enc) { // Initialization code common to all of the constructors. // // Convert `src` to SQLWCHAR. static PyObject* nulls = NULL; if (src == 0 || src == Py_None) { psz = 0; isNone = true; return; } isNone = false; // If there are optimized encodings that don't require a temporary object, use them. if (enc.optenc == OPTENC_UTF8 && PyUnicode_Check(src)) { psz = (SQLWCHAR*)PyUnicode_AsUTF8(src); return; } PyObject* pb = 0; if (!pb && PyUnicode_Check(src)) pb = PyUnicode_AsEncodedString(src, enc.name, "strict"); if (pb) { // Careful: Some encodings don't return bytes. if (!PyBytes_Check(pb)) { // REVIEW: Error or just return null? psz = 0; Py_DECREF(pb); return; } if(!nulls) nulls = PyBytes_FromStringAndSize("\0\0\0\0", 4); PyBytes_Concat(&pb, nulls); if (!pb) { psz = 0; return; } } else { // If the encoding failed (possibly due to "strict"), it will generate an exception, but // we're going to continue. PyErr_Clear(); psz = 0; } if (pb) { bytes.Attach(pb); psz = (SQLWCHAR*)PyBytes_AS_STRING(pb); } } PyObject* TextEnc::Encode(PyObject* obj) const { PyObject* bytes = PyCodec_Encode(obj, name, "strict"); if (bytes && PyErr_Occurred()) { // REVIEW: Issue #206. I am not sure what is going on here, but PyCodec_Encode // sometimes returns bytes but *also* sets an exception saying "'ascii' codec can't // encode characters...". I assume the ascii is from my sys encoding, but it seems to // be a superfluous error. Since Cursor.fetchall() looks for exceptions this extraneous // error causes us to throw an exception. // // I'm putting in a work around but we should track down the root cause and report it // to the Python project if it is not ours. PyErr_Clear(); } return bytes; } PyObject* TextBufferToObject(const TextEnc& enc, const byte* pbData, Py_ssize_t cbData) { // cbData // The length of data in bytes (cb == 'count of bytes'). // NB: In each branch we make a check for a zero length string and handle it specially // since PyUnicode_Decode may (will?) fail if we pass a zero-length string. Issue #172 // first pointed this out with shift_jis. I'm not sure if it is a fault in the // implementation of this codec or if others will have it also. // PyObject* str; if (cbData == 0) return PyUnicode_FromStringAndSize("", 0); switch (enc.optenc) { case OPTENC_UTF8: return PyUnicode_DecodeUTF8((char*)pbData, cbData, "strict"); case OPTENC_UTF16: { int byteorder = BYTEORDER_NATIVE; return PyUnicode_DecodeUTF16((char*)pbData, cbData, "strict", &byteorder); } case OPTENC_UTF16LE: { int byteorder = BYTEORDER_LE; return PyUnicode_DecodeUTF16((char*)pbData, cbData, "strict", &byteorder); } case OPTENC_UTF16BE: { int byteorder = BYTEORDER_BE; return PyUnicode_DecodeUTF16((char*)pbData, cbData, "strict", &byteorder); } case OPTENC_LATIN1: return PyUnicode_DecodeLatin1((char*)pbData, cbData, "strict"); } // The user set an encoding by name. return PyUnicode_Decode((char*)pbData, cbData, enc.name, "strict"); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/textenc.h0000644000175100001770000001021014560207600015006 0ustar00runnerdocker#ifndef _TEXTENC_H #define _TEXTENC_H enum { BYTEORDER_LE = -1, BYTEORDER_NATIVE = 0, BYTEORDER_BE = 1, OPTENC_NONE = 0, // No optimized encoding - use the named encoding OPTENC_UTF8 = 1, OPTENC_UTF16 = 2, // "Native", so check for BOM and default to BE OPTENC_UTF16BE = 3, OPTENC_UTF16LE = 4, OPTENC_LATIN1 = 5, OPTENC_UTF32 = 6, OPTENC_UTF32LE = 7, OPTENC_UTF32BE = 8, }; #ifdef WORDS_BIGENDIAN # define OPTENC_UTF16NE OPTENC_UTF16BE # define ENCSTR_UTF16NE "utf-16be" #else # define OPTENC_UTF16NE OPTENC_UTF16LE # define ENCSTR_UTF16NE "utf-16le" #endif struct TextEnc { // Holds encoding information for reading or writing text. Since some drivers / databases // are not easy to configure efficiently, a separate instance of this structure is // configured for: // // * reading SQL_CHAR // * reading SQL_WCHAR // * writing unicode strings // * reading metadata like column names // // I would have expected the metadata to follow the SQLCHAR / SQLWCHAR based on whether the // ANSI or wide API was called, but it does not. int optenc; // Set to one of the OPTENC constants to indicate whether an optimized encoding is to be // used or a custom one. If OPTENC_NONE, no optimized encoding is set and `name` should be // used. const char* name; // The name of the encoding. This must be freed using `free`. SQLSMALLINT ctype; // The C type to use, SQL_C_CHAR or SQL_C_WCHAR. Normally this matches the SQL type of the // column (SQL_C_CHAR is used for SQL_CHAR, etc.). At least one database reports it has // SQL_WCHAR data even when configured for UTF-8 which is better suited for SQL_C_CHAR. PyObject* Encode(PyObject*) const; // Given a string, return a bytes object encoded. This is used for encoding a Python // object for passing to a function expecting SQLCHAR* or SQLWCHAR*. }; class SQLWChar { // A convenience object that encodes a Unicode string to a given encoding. It can be cast // to a SQLWCHAR* to return the pointer. // // This is designed to be created on the stack, perform the conversion, and cleanup any // temporary objects in the destructor. // // The SQLWCHAR pointer is *only* valid during the lifetime of this object. It may point // into a temporary `bytes` object that is deleted by the constructor. public: SQLWChar() { psz = 0; isNone = true; } SQLWChar(PyObject* src, const char* szEncoding) { psz = 0; isNone = true; set(src, szEncoding); } SQLWChar(PyObject* src, const TextEnc* penc) { init(src, *penc); } SQLWChar(PyObject* src, const TextEnc& enc) { init(src, enc); } bool isValidOrNone() { // Returns true if this object is a valid string *or* None. return isNone || (psz != 0); } bool isValid() { return psz != 0; } void set(PyObject* src, const char* szEncoding) { bytes.Attach(0); // free old, if any psz = 0; isNone = true; TextEnc enc; enc.name = szEncoding; enc.ctype = SQL_C_WCHAR; enc.optenc = OPTENC_NONE; init(src, enc); } SQLWCHAR* get() { return psz; } operator SQLWCHAR*() { return psz; } private: SQLWCHAR* psz; bool isNone; Object bytes; // A temporary object holding the decoded bytes if we can't use a pointer into the original // object. void init(PyObject* src, const TextEnc& enc); SQLWChar(const SQLWChar&) {} void operator=(const SQLWChar&) {} }; PyObject* TextBufferToObject(const TextEnc& enc, const byte* p, Py_ssize_t len); // Convert a text buffer to a Python object using the given encoding. // // - pbData :: The buffer, which is an array of SQLCHAR or SQLWCHAR. We treat it as bytes here // since the encoding `enc` tells us how to treat it. // - cbData :: The length of `pbData` in *bytes*. #endif // _TEXTENC_H ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/src/wrapper.h0000644000175100001770000000337514560207600015032 0ustar00runnerdocker#ifndef _WRAPPER_H_ #define _WRAPPER_H_ class Object { // This is a simple wrapper around PyObject pointers to release them when this object goes // out of scope. Note that it does *not* increment the reference count on acquisition but // it *does* decrement the count if you don't use Detach. // // It also does not have a copy constructor and doesn't try to manage passing pointers // around. This is simply used to simplify functions by allowing early exits. Object(const Object& illegal) { } void operator=(const Object& illegal) { } protected: PyObject* p; public: Object(PyObject* _p = 0) { p = _p; } ~Object() { Py_XDECREF(p); } Object& operator=(PyObject* pNew) { Py_XDECREF(p); p = pNew; return *this; } bool IsValid() const { return p != 0; } bool Attach(PyObject* _p) { // Returns true if the new pointer is non-zero. Py_XDECREF(p); p = _p; return (_p != 0); } PyObject* Detach() { PyObject* pT = p; p = 0; return pT; } operator PyObject*() { return p; } operator PyTupleObject*() { // This is a bit weird. I'm surprised the PyTuple_ functions and macros don't just use // PyObject. return (PyTupleObject*)p; } operator PyVarObject*() { return (PyVarObject*)p; } operator const bool() { return p != 0; } PyObject* Get() { return p; } }; #ifdef WINVER struct RegKey { HKEY hkey; RegKey() { hkey = 0; } ~RegKey() { if (hkey != 0) RegCloseKey(hkey); } operator HKEY() { return hkey; } }; #endif #endif // _WRAPPER_H_ ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1707151235.2462423 pyodbc-5.1.0/tests/0000755000175100001770000000000014560207603013547 5ustar00runnerdocker././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/tests/__init__.py0000644000175100001770000000031614560207600015655 0ustar00runnerdocker# This file is required to simplify running pytest. # # Build pyodbc into the project root by running: python setup.py build_ext --inplace # # Then run pytest from the root: pytest tests/postgresql_test.py ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/tests/conftest.py0000644000175100001770000000422114560207600015742 0ustar00runnerdocker# If pyodbc has not been installed in this virtual environment, add the appropriate build # directory. This allows us to compile and run pytest without an install step in between # slowing things down. If you are going to use this, do not install pyodbc. If you already # have, uninstall it. # # This is useful for me for very fast testing, but I realize some people may not like it, so # I'll only enable it if PYODBC_TESTLOCAL is set. import os, sys import importlib.machinery from datetime import datetime from os.path import join, dirname, abspath, getmtime def pytest_configure(config): if os.environ.get('PYODBC_TESTLOCAL') != '1': return try: import pyodbc return except: _add_to_path() def _add_to_path(): """ Prepends the build directory to the path so that newly built pyodbc libraries are used, allowing it to be tested without pip-installing it. """ # look for the suffixes used in the build filenames, e.g. ".cp38-win_amd64.pyd", # ".cpython-38-darwin.so", ".cpython-38-x86_64-linux-gnu.so", etc. library_exts = [ext for ext in importlib.machinery.EXTENSION_SUFFIXES if ext != '.pyd'] # generate the name of the pyodbc build file(s) library_names = [f'pyodbc{ext}' for ext in library_exts] # the build directory is assumed to be one directory up from this file build_dir = join(dirname(dirname(abspath(__file__))), 'build') # find all the relevant pyodbc build files, and get their modified dates file_info = [ (getmtime(join(dirpath, file)), join(dirpath, file)) for dirpath, dirs, files in os.walk(build_dir) for file in files if file in library_names ] if file_info: file_info.sort() # put them in chronological order library_modified_dt, library_path = file_info[-1] # use the latest one # add the build directory to the Python path sys.path.insert(0, dirname(library_path)) print(f'Library: {library_path} (last modified {datetime.fromtimestamp(library_modified_dt)})') else: print('Did not find the pyodbc library in the build directory. Will use the installed version.') ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/tests/mysql_test.py0000644000175100001770000003440614560207600016331 0ustar00runnerdocker""" pytest unit tests for MySQL. Uses a DNS name 'mysql' and uses UTF-8 """ # -*- coding: utf-8 -*- import os from decimal import Decimal from datetime import date, datetime from functools import lru_cache from typing import Iterator import pyodbc, pytest CNXNSTR = os.environ.get('PYODBC_MYSQL', 'DSN=mysql;charset=utf8mb4') def connect(autocommit=False, attrs_before=None): c = pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before) # As of libmyodbc5w 5.3 SQLGetTypeInfo returns absurdly small sizes # leading to slow writes. Override them: c.maxwrite = 1024 * 1024 * 1024 # My MySQL configuration (and I think the default) sends *everything* # in UTF-8. The pyodbc default is to send Unicode as UTF-16 and to # decode WCHAR via UTF-16. Change them both to UTF-8. c.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') c.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') c.setencoding(encoding='utf-8') return c @pytest.fixture() def cursor() -> Iterator[pyodbc.Cursor]: cnxn = connect() cur = cnxn.cursor() cur.execute("drop table if exists t1") cur.execute("drop table if exists t2") cur.execute("drop table if exists t3") cnxn.commit() yield cur if not cnxn.closed: cur.close() cnxn.close() def test_text(cursor: pyodbc.Cursor): _test_vartype(cursor, 'text') def test_varchar(cursor: pyodbc.Cursor): _test_vartype(cursor, 'varchar') def test_varbinary(cursor: pyodbc.Cursor): _test_vartype(cursor, 'varbinary') def test_blob(cursor: pyodbc.Cursor): _test_vartype(cursor, 'blob') def _test_vartype(cursor, datatype): cursor.execute(f"create table t1(c1 {datatype}(4000))") for length in [None, 0, 100, 1000, 4000]: cursor.execute("delete from t1") encoding = (datatype in ('blob', 'varbinary')) and 'utf8' or None value = _generate_str(length, encoding=encoding) cursor.execute("insert into t1 values(?)", value) v = cursor.execute("select * from t1").fetchone()[0] assert v == value def test_char(cursor: pyodbc.Cursor): value = "testing" cursor.execute("create table t1(s char(7))") cursor.execute("insert into t1 values(?)", "testing") v = cursor.execute("select * from t1").fetchone()[0] assert v == value def test_int(cursor: pyodbc.Cursor): _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678]) def test_bigint(cursor: pyodbc.Cursor): _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 0x123456789, 0x7FFFFFFF, 0xFFFFFFFF, 0x123456789]) def test_float(cursor: pyodbc.Cursor): _test_scalar(cursor, 'float', [None, -1, 0, 1, 1234.5, -200]) def _test_scalar(cursor: pyodbc.Cursor, datatype, values): cursor.execute(f"create table t1(c1 {datatype})") for value in values: cursor.execute("delete from t1") cursor.execute("insert into t1 values (?)", value) v = cursor.execute("select c1 from t1").fetchone()[0] assert v == value def test_decimal(cursor: pyodbc.Cursor): tests = [ ('100010', '19'), # The ODBC docs tell us how the bytes should look in the C struct ('1000.10', '20,6'), ('-10.0010', '19,4') ] for value, prec in tests: value = Decimal(value) cursor.execute("drop table if exists t1") cursor.execute(f"create table t1(c1 numeric({prec}))") cursor.execute("insert into t1 values (?)", value) v = cursor.execute("select c1 from t1").fetchone()[0] assert v == value def test_multiple_bindings(cursor: pyodbc.Cursor): "More than one bind and select on a cursor" cursor.execute("create table t1(n int)") cursor.execute("insert into t1 values (?)", 1) cursor.execute("insert into t1 values (?)", 2) cursor.execute("insert into t1 values (?)", 3) for i in range(3): cursor.execute("select n from t1 where n < ?", 10) cursor.execute("select n from t1 where n < 3") def test_different_bindings(cursor: pyodbc.Cursor): cursor.execute("create table t1(n int)") cursor.execute("create table t2(d datetime)") cursor.execute("insert into t1 values (?)", 1) cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(): p = pyodbc.drivers() assert isinstance(p, list) def test_datasources(): p = pyodbc.dataSources() assert isinstance(p, dict) def test_getinfo_string(): cnxn = connect() value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) assert isinstance(value, str) def test_getinfo_bool(): cnxn = connect() value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) assert isinstance(value, bool) def test_getinfo_int(): cnxn = connect() value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) assert isinstance(value, int) def test_getinfo_smallint(): cnxn = connect() value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) assert isinstance(value, int) def test_subquery_params(cursor: pyodbc.Cursor): """Ensure parameter markers work in a subquery""" cursor.execute("create table t1(id integer, s varchar(20))") cursor.execute("insert into t1 values (?,?)", 1, 'test') row = cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() assert row[0] == 1 def test_close_cnxn(): """Make sure using a Cursor after closing its connection doesn't crash.""" cnxn = connect() cursor = cnxn.cursor() cursor.execute("drop table if exists t1") cursor.execute("create table t1(id integer, s varchar(20))") cursor.execute("insert into t1 values (?,?)", 1, 'test') cursor.execute("select * from t1") cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) with pytest.raises(pyodbc.ProgrammingError): cursor.execute("select * from t1") def test_negative_row_index(cursor: pyodbc.Cursor): cursor.execute("create table t1(s varchar(20))") cursor.execute("insert into t1 values(?)", "1") row = cursor.execute("select * from t1").fetchone() assert row[0] == "1" assert row[-1] == "1" def test_version(): assert 3 == len(pyodbc.version.split('.')) # 1.3.1 etc. def test_date(cursor: pyodbc.Cursor): value = date(2001, 1, 1) cursor.execute("create table t1(dt date)") cursor.execute("insert into t1 values (?)", value) result = cursor.execute("select dt from t1").fetchone()[0] assert type(result) == type(value) assert result == value def test_time(cursor: pyodbc.Cursor): value = datetime.now().time() # We aren't yet writing values using the new extended time type so the value written to the # database is only down to the second. value = value.replace(microsecond=0) cursor.execute("create table t1(t time)") cursor.execute("insert into t1 values (?)", value) result = cursor.execute("select t from t1").fetchone()[0] assert value == result def test_datetime(cursor: pyodbc.Cursor): value = datetime(2007, 1, 15, 3, 4, 5) cursor.execute("create table t1(dt datetime)") cursor.execute("insert into t1 values (?)", value) result = cursor.execute("select dt from t1").fetchone()[0] assert value == result def test_rowcount_delete(cursor: pyodbc.Cursor): cursor.execute("create table t1(i int)") count = 4 for i in range(count): cursor.execute("insert into t1 values (?)", i) cursor.execute("delete from t1") assert cursor.rowcount == count def test_rowcount_nodata(cursor: pyodbc.Cursor): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ cursor.execute("create table t1(i int)") # This is a different code path internally. cursor.execute("delete from t1") assert cursor.rowcount == 0 def test_rowcount_select(cursor: pyodbc.Cursor): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount. Databases can return the actual rowcount or they can return -1 if it would help performance. MySQL seems to always return the correct rowcount. """ cursor.execute("create table t1(i int)") count = 4 for i in range(count): cursor.execute("insert into t1 values (?)", i) cursor.execute("select * from t1") assert cursor.rowcount == count rows = cursor.fetchall() assert len(rows) == count assert cursor.rowcount == count def test_rowcount_reset(cursor: pyodbc.Cursor): "Ensure rowcount is reset to -1" # The Python DB API says that rowcount should be set to -1 and most ODBC drivers let us # know there are no records. MySQL always returns 0, however. Without parsing the SQL # (which we are not going to do), I'm not sure how we can tell the difference and set the # value to -1. For now, I'll have this test check for 0. cursor.execute("create table t1(i int)") count = 4 for i in range(count): cursor.execute("insert into t1 values (?)", i) assert cursor.rowcount == 1 cursor.execute("create table t2(i int)") assert cursor.rowcount == 0 def test_lower_case(): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor cnxn = connect() pyodbc.lowercase = True cursor = cnxn.cursor() cursor.execute("drop table if exists t1") cursor.execute("create table t1(Abc int, dEf int)") cursor.execute("select * from t1") names = [t[0] for t in cursor.description] names.sort() assert names == ["abc", "def"] # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(cursor: pyodbc.Cursor): """ Ensure Cursor.description is accessible as Row.cursor_description. """ cursor.execute("create table t1(a int, b char(3))") cursor.execute("insert into t1 values(1, 'abc')") row = cursor.execute("select * from t1").fetchone() assert cursor.description == row.cursor_description def test_executemany(cursor: pyodbc.Cursor): cursor.execute("create table t1(a int, b varchar(10))") params = [(i, str(i)) for i in range(1, 6)] cursor.executemany("insert into t1(a, b) values (?,?)", params) count = cursor.execute("select count(*) from t1").fetchone()[0] assert count == len(params) cursor.execute("select a, b from t1 order by a") rows = cursor.fetchall() assert count == len(rows) for param, row in zip(params, rows): assert param[0] == row[0] assert param[1] == row[1] def test_executemany_one(cursor: pyodbc.Cursor): "Pass executemany a single sequence" cursor.execute("create table t1(a int, b varchar(10))") params = [(1, "test")] cursor.executemany("insert into t1(a, b) values (?,?)", params) count = cursor.execute("select count(*) from t1").fetchone()[0] assert count == len(params) cursor.execute("select a, b from t1 order by a") rows = cursor.fetchall() assert count == len(rows) for param, row in zip(params, rows): assert param[0] == row[0] assert param[1] == row[1] def test_row_slicing(cursor: pyodbc.Cursor): cursor.execute("create table t1(a int, b int, c int, d int)") cursor.execute("insert into t1 values(1,2,3,4)") row = cursor.execute("select * from t1").fetchone() result = row[:] assert result is row result = row[:-1] assert result == (1, 2, 3) result = row[0:4] assert result is row def test_row_repr(cursor: pyodbc.Cursor): cursor.execute("create table t1(a int, b int, c int, d int)") cursor.execute("insert into t1 values(1,2,3,4)") row = cursor.execute("select * from t1").fetchone() result = str(row) assert result == "(1, 2, 3, 4)" result = str(row[:-1]) assert result == "(1, 2, 3)" result = str(row[:1]) assert result == "(1,)" def test_emoticons_as_parameter(cursor: pyodbc.Cursor): # https://github.com/mkleehammer/pyodbc/issues/423 # # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number # of characters. Ensure it works even with 4-byte characters. # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = "x \U0001F31C z" cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") cursor.execute("insert into t1 values (?)", v) result = cursor.execute("select s from t1").fetchone()[0] assert result == v def test_emoticons_as_literal(cursor: pyodbc.Cursor): # https://github.com/mkleehammer/pyodbc/issues/630 v = "x \U0001F31C z" cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") cursor.execute("insert into t1 values ('%s')" % v) result = cursor.execute("select s from t1").fetchone()[0] assert result == v @lru_cache def _generate_str(length, encoding=None): """ Returns either a string or bytes, depending on whether encoding is provided, that is `length` elements long. If length is None, None is returned. This simplifies the tests by letting us put None into an array of other lengths and pass them here, moving the special case check into one place. """ if length is None: return None # Put non-ASCII characters at the front so we don't end up chopping one in half in a # multi-byte encoding like UTF-8. v = 'á' remaining = max(0, length - len(v)) if remaining: seed = '0123456789-abcdefghijklmnopqrstuvwxyz-' if remaining <= len(seed): v += seed else: c = (remaining + len(seed) - 1 // len(seed)) v += seed * c if encoding: v = v.encode(encoding) # We chop *after* encoding because if we are encoding then we want bytes. v = v[:length] return v ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/tests/postgresql_test.py0000644000175100001770000004655514560207600017377 0ustar00runnerdocker""" Unit tests for PostgreSQL """ # -*- coding: utf-8 -*- import os, uuid from decimal import Decimal from typing import Iterator import pyodbc, pytest CNXNSTR = os.environ.get('PYODBC_POSTGRESQL', 'DSN=pyodbc-postgres') def connect(autocommit=False, attrs_before=None): return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before) @pytest.fixture() def cursor() -> Iterator[pyodbc.Cursor]: cnxn = connect() cur = cnxn.cursor() cur.execute("drop table if exists t1") cur.execute("drop table if exists t2") cur.execute("drop table if exists t3") cnxn.commit() yield cur if not cnxn.closed: cur.close() cnxn.close() def _generate_str(length, encoding=None): """ Returns either a string or bytes, depending on whether encoding is provided, that is `length` elements long. If length is None, None is returned. This simplifies the tests by letting us put None into an array of other lengths and pass them here, moving the special case check into one place. """ if length is None: return None seed = '0123456789-abcdefghijklmnopqrstuvwxyz-' if length <= len(seed): v = seed else: c = (length + len(seed) - 1 // len(seed)) v = seed * c v = v[:length] if encoding: v = v.encode(encoding) return v def test_text(cursor: pyodbc.Cursor): cursor.execute("create table t1(col text)") # Two different read code paths exist based on the length. Using 100 and 4000 will ensure # both are tested. for length in [None, 0, 100, 1000, 4000]: cursor.execute("truncate table t1") param = _generate_str(length) cursor.execute("insert into t1 values (?)", param) result = cursor.execute("select col from t1").fetchval() assert result == param def test_text_many(cursor: pyodbc.Cursor): # This shouldn't make a difference, but we'll ensure we can read and write from multiple # columns at the same time. cursor.execute("create table t1(col1 text, col2 text, col3 text)") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 cursor.execute("insert into t1(col1, col2, col3) values (?,?,?)", v1, v2, v3) row = cursor.execute("select col1, col2, col3 from t1").fetchone() assert v1 == row.col1 assert v2 == row.col2 assert v3 == row.col3 def test_chinese(cursor: pyodbc.Cursor): v = '我的' row = cursor.execute("SELECT N'我的' AS name").fetchone() assert row[0] == v rows = cursor.execute("SELECT N'我的' AS name").fetchall() assert rows[0][0] == v def test_bytea(cursor: pyodbc.Cursor): cursor.execute("create table t1(col bytea)") for length in [None, 0, 100, 1000, 4000]: cursor.execute("truncate table t1") param = _generate_str(length, 'utf8') cursor.execute("insert into t1 values (?)", param) result = cursor.execute("select col from t1").fetchval() assert result == param def test_bytearray(cursor: pyodbc.Cursor): """ We will accept a bytearray and treat it like bytes, but when reading we'll still get bytes back. """ cursor.execute("create table t1(col bytea)") # Two different read code paths exist based on the length. Using 100 and 4000 will ensure # both are tested. for length in [0, 100, 1000, 4000]: cursor.execute("truncate table t1") bytes = _generate_str(length, 'utf8') param = bytearray(bytes) cursor.execute("insert into t1 values (?)", param) result = cursor.execute("select col from t1").fetchval() assert result == bytes def test_int(cursor: pyodbc.Cursor): cursor.execute("create table t1(col int)") for param in [None, -1, 0, 1, 0x7FFFFFFF]: cursor.execute("truncate table t1") cursor.execute("insert into t1 values (?)", param) result = cursor.execute("select col from t1").fetchval() assert result == param def test_bigint(cursor: pyodbc.Cursor): cursor.execute("create table t1(col bigint)") for param in [None, -1, 0, 1, 0x7FFFFFFF, 0xFFFFFFFF, 0x123456789]: cursor.execute("truncate table t1") cursor.execute("insert into t1 values (?)", param) result = cursor.execute("select col from t1").fetchval() assert result == param def test_float(cursor: pyodbc.Cursor): cursor.execute("create table t1(col float)") for param in [None, -1, 0, 1, -200, 20000]: cursor.execute("truncate table t1") cursor.execute("insert into t1 values (?)", param) result = cursor.execute("select col from t1").fetchval() assert result == param def test_decimal(cursor: pyodbc.Cursor): cursor.execute("create table t1(col decimal(20,6))") # Note: Use strings to initialize the decimals to eliminate floating point rounding. # # Also, the ODBC docs show the value 100010 in the C struct, so I've included it here, # along with a couple of shifted versions. params = [Decimal(n) for n in "-1000.10 -1234.56 -1 0 1 1000.10 1234.56 100010 123456789.21".split()] params.append(None) for param in params: cursor.execute("truncate table t1") cursor.execute("insert into t1 values (?)", param) result = cursor.execute("select col from t1").fetchval() assert result == param def test_numeric(cursor: pyodbc.Cursor): cursor.execute("create table t1(col numeric(20,6))") # Note: Use strings to initialize the decimals to eliminate floating point rounding. params = [Decimal(n) for n in "-1234.56 -1 0 1 1234.56 123456789.21".split()] params.append(None) for param in params: cursor.execute("truncate table t1") cursor.execute("insert into t1 values (?)", param) result = cursor.execute("select col from t1").fetchval() assert result == param def test_maxwrite(cursor: pyodbc.Cursor): # If we write more than `maxwrite` bytes, pyodbc will switch from binding the data all at # once to providing it at execute time with SQLPutData. The default maxwrite is 1GB so # this is rarely needed in PostgreSQL but I need to test the functionality somewhere. cursor.connection.maxwrite = 300 cursor.execute("create table t1(col text)") param = _generate_str(400) cursor.execute("insert into t1 values (?)", param) result = cursor.execute("select col from t1").fetchval() assert result == param def test_nonnative_uuid(cursor: pyodbc.Cursor): pyodbc.native_uuid = False param = uuid.uuid4() cursor.execute("create table t1(n uuid)") cursor.execute("insert into t1 values (?)", param) result = cursor.execute("select n from t1").fetchval() assert isinstance(result, str) assert result == str(param).upper() def test_native_uuid(cursor: pyodbc.Cursor): pyodbc.native_uuid = True # When true, we should return a uuid.UUID object. param = uuid.uuid4() cursor.execute("create table t1(n uuid)") cursor.execute("insert into t1 values (?)", param) result = cursor.execute("select n from t1").fetchval() assert isinstance(result, uuid.UUID) assert param == result def test_close_cnxn(cursor: pyodbc.Cursor): """Make sure using a Cursor after closing its connection doesn't crash.""" cursor.execute("create table t1(id integer, s varchar(20))") cursor.execute("insert into t1 values (?,?)", 1, 'test') cursor.execute("select * from t1") cursor.connection.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) with pytest.raises(pyodbc.ProgrammingError): cursor.execute("select * from t1") def test_version(): assert len(pyodbc.version.split('.')) == 3 def test_rowcount(cursor: pyodbc.Cursor): assert cursor.rowcount == -1 # The spec says it should be -1 when not in use. cursor.execute("create table t1(col int)") count = 4 for i in range(count): cursor.execute("insert into t1 values (?)", i) cursor.execute("select * from t1") assert cursor.rowcount == count cursor.execute("update t1 set col=col+1") assert cursor.rowcount == count cursor.execute("delete from t1") assert cursor.rowcount == count # This is a different code path - the value internally is SQL_NO_DATA instead of an empty # result set. Just make sure it doesn't crash. cursor.execute("delete from t1") assert cursor.rowcount == 0 # IMPORTANT: The ODBC spec says it should be -1 after the create table, but the PostgreSQL # driver is telling pyodbc the rowcount is 0. Since we have no way of knowing when to # override it, we'll just update the test to ensure it is consistently zero. cursor.execute("create table t2(i int)") assert cursor.rowcount == 0 def test_row_description(cursor: pyodbc.Cursor): """ Ensure Cursor.description is accessible as Row.cursor_description. """ cursor.execute("create table t1(col1 int, col2 char(3))") cursor.execute("insert into t1 values(1, 'abc')") row = cursor.execute("select col1, col2 from t1").fetchone() assert row.cursor_description == cursor.description def test_lower_case(cursor: pyodbc.Cursor): "Ensure pyodbc.lowercase forces returned column names to lowercase." try: pyodbc.lowercase = True cursor.execute("create table t1(Abc int, dEf int)") cursor.execute("select * from t1") names = {t[0] for t in cursor.description} assert names == {'abc', 'def'} finally: pyodbc.lowercase = False def test_executemany(cursor: pyodbc.Cursor): cursor.execute("create table t1(col1 int, col2 varchar(10))") params = [(i, str(i)) for i in range(1, 6)] # Without fast_executemany cursor.executemany("insert into t1(col1, col2) values (?,?)", params) cursor.execute("select col1, col2 from t1 order by col1") results = [tuple(row) for row in cursor] assert results == params # With fast_executemany try: pyodbc.fast_executemany = True cursor.execute("truncate table t1") cursor.executemany("insert into t1(col1, col2) values (?,?)", params) cursor.execute("select col1, col2 from t1 order by col1") results = [tuple(row) for row in cursor] assert results == params finally: pyodbc.fast_executemany = False def test_executemany_failure(cursor: pyodbc.Cursor): """ Ensure that an exception is raised if one query in an executemany fails. """ cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] with pytest.raises(pyodbc.Error): cursor.executemany("insert into t1(a, b) value (?, ?)", params) def test_row_slicing(cursor: pyodbc.Cursor): cursor.execute("create table t1(a int, b int, c int, d int)") cursor.execute("insert into t1 values(1,2,3,4)") row = cursor.execute("select * from t1").fetchone() result = row[:] assert result is row # returned as is result = row[:-1] assert result == (1, 2, 3) # returned as tuple result = row[0:4] assert result is row def test_drivers(): p = pyodbc.drivers() assert isinstance(p, list) def test_datasources(): p = pyodbc.dataSources() assert isinstance(p, dict) def test_getinfo_string(cursor: pyodbc.Cursor): value = cursor.connection.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) assert isinstance(value, str) def test_getinfo_bool(cursor: pyodbc.Cursor): value = cursor.connection.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) assert isinstance(value, bool) def test_getinfo_int(cursor: pyodbc.Cursor): value = cursor.connection.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) assert isinstance(value, int) def test_getinfo_smallint(cursor: pyodbc.Cursor): value = cursor.connection.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) assert isinstance(value, int) def test_cnxn_execute_error(cursor: pyodbc.Cursor): """ Make sure that Connection.execute (not Cursor) errors are not "eaten". GitHub issue #74 """ cursor.execute("create table t1(a int primary key)") cursor.execute("insert into t1 values (1)") with pytest.raises(pyodbc.Error): cursor.connection.execute("insert into t1 values (1)") def test_row_repr(cursor: pyodbc.Cursor): cursor.execute("create table t1(a int, b int, c int, d int)") cursor.execute("insert into t1 values(1,2,3,4)") row = cursor.execute("select * from t1").fetchone() result = str(row) assert result == "(1, 2, 3, 4)" result = str(row[:-1]) assert result == "(1, 2, 3)" result = str(row[:1]) assert result == "(1,)" def test_autocommit(cursor: pyodbc.Cursor): assert cursor.connection.autocommit is False othercnxn = connect(autocommit=True) assert othercnxn.autocommit is True othercnxn.autocommit = False assert othercnxn.autocommit is False def test_exc_integrity(cursor: pyodbc.Cursor): "Make sure an IntegretyError is raised" # This is really making sure we are properly encoding and comparing the SQLSTATEs. cursor.execute("create table t1(s1 varchar(10) primary key)") cursor.execute("insert into t1 values ('one')") with pytest.raises(pyodbc.IntegrityError): cursor.execute("insert into t1 values ('one')") def test_cnxn_set_attr_before(): # I don't have a getattr right now since I don't have a table telling me what kind of # value to expect. For now just make sure it doesn't crash. # From the unixODBC sqlext.h header file. SQL_ATTR_PACKET_SIZE = 112 _cnxn = connect(attrs_before={ SQL_ATTR_PACKET_SIZE : 1024 * 32 }) def test_cnxn_set_attr(cursor: pyodbc.Cursor): # I don't have a getattr right now since I don't have a table telling me what kind of # value to expect. For now just make sure it doesn't crash. # From the unixODBC sqlext.h header file. SQL_ATTR_ACCESS_MODE = 101 SQL_MODE_READ_ONLY = 1 cursor.connection.set_attr(SQL_ATTR_ACCESS_MODE, SQL_MODE_READ_ONLY) def test_columns(cursor: pyodbc.Cursor): driver_version = tuple( int(x) for x in cursor.connection.getinfo(pyodbc.SQL_DRIVER_VER).split(".") ) def _get_column_size(row): # the driver changed the name of the returned columns in version 13.02. # see https://odbc.postgresql.org/docs/release.html, release 13.02.0000, change 6. return row.column_size if driver_version >= (13, 2, 0) else row.precision # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error # # Error: TypeError: argument 2 must be str, not None # # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an # optional string keyword when calling indirectly. cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))") cursor.columns('t1') results = {row.column_name: row for row in cursor} row = results['a'] assert row.type_name == 'int4', row.type_name row = results['b'] assert row.type_name == 'varchar' assert _get_column_size(row) == 3, _get_column_size(row) row = results['xΏz'] assert row.type_name == 'varchar' assert _get_column_size(row) == 4, _get_column_size(row) # Now do the same, but specifically pass in None to one of the keywords. Old versions # were parsing arguments incorrectly and would raise an error. (This crops up when # calling indirectly like columns(*args, **kwargs) which aiodbc does.) cursor.columns('t1', schema=None, catalog=None) results = {row.column_name: row for row in cursor} row = results['a'] assert row.type_name == 'int4', row.type_name row = results['b'] assert row.type_name == 'varchar' assert _get_column_size(row) == 3 def test_cancel(cursor: pyodbc.Cursor): # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with # making sure SQLCancel is called correctly. cursor.execute("select 1") cursor.cancel() def test_emoticons_as_parameter(cursor: pyodbc.Cursor): # https://github.com/mkleehammer/pyodbc/issues/423 # # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number # of characters. Ensure it works even with 4-byte characters. # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = "x \U0001F31C z" cursor.execute("CREATE TABLE t1(s varchar(100))") cursor.execute("insert into t1 values (?)", v) result = cursor.execute("select s from t1").fetchone()[0] assert result == v def test_emoticons_as_literal(cursor: pyodbc.Cursor): # https://github.com/mkleehammer/pyodbc/issues/630 v = "x \U0001F31C z" cursor.execute("CREATE TABLE t1(s varchar(100))") cursor.execute(f"insert into t1 values ('{v}')") result = cursor.execute("select s from t1").fetchone()[0] assert result == v def test_cursor_messages(cursor: pyodbc.Cursor): """ Test the Cursor.messages attribute. """ # Using INFO message level because they are always sent to the client regardless of # client_min_messages: https://www.postgresql.org/docs/11/runtime-config-client.html for msg in ('hello world', 'ABCDEFGHIJ' * 800): cursor.execute(f""" CREATE OR REPLACE PROCEDURE test_cursor_messages() LANGUAGE plpgsql AS $$ BEGIN RAISE INFO '{msg}' USING ERRCODE = '01000'; END; $$; """) cursor.execute("CALL test_cursor_messages();") messages = cursor.messages # There is a maximum size for these so the second msg will actually generate a bunch of # messages. To make it easier to compare, we'll stitch them back together. if len(messages) > 1: concat = ''.join(t[1] for t in messages) messages = [(messages[0][0], concat)] assert messages == [('[01000] (-1)', f'INFO: {msg}')] def test_output_conversion(cursor: pyodbc.Cursor): # Note the use of SQL_WVARCHAR, not SQL_VARCHAR. def convert(value): # The value is the raw bytes (as a bytes object) read from the # database. We'll simply add an X at the beginning at the end. return 'X' + value.decode('latin1') + 'X' cursor.execute("create table t1(n int, v varchar(10))") cursor.execute("insert into t1 values (1, '123.45')") cursor.connection.add_output_converter(pyodbc.SQL_WVARCHAR, convert) value = cursor.execute("select v from t1").fetchone()[0] assert value == 'X123.45X' # Clear all conversions and try again. There should be no Xs this time. cursor.connection.clear_output_converters() value = cursor.execute("select v from t1").fetchone()[0] assert value == '123.45' # Same but clear using remove_output_converter. cursor.connection.add_output_converter(pyodbc.SQL_WVARCHAR, convert) value = cursor.execute("select v from t1").fetchone()[0] assert value == 'X123.45X' cursor.connection.remove_output_converter(pyodbc.SQL_WVARCHAR) value = cursor.execute("select v from t1").fetchone()[0] assert value == '123.45' # And lastly, clear by passing None for the converter. cursor.connection.add_output_converter(pyodbc.SQL_WVARCHAR, convert) value = cursor.execute("select v from t1").fetchone()[0] assert value == 'X123.45X' cursor.connection.add_output_converter(pyodbc.SQL_WVARCHAR, None) value = cursor.execute("select v from t1").fetchone()[0] assert value == '123.45' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707151232.0 pyodbc-5.1.0/tests/sqlserver_test.py0000755000175100001770000015205614560207600017217 0ustar00runnerdocker#!/usr/bin/python import os, uuid, re, sys from decimal import Decimal from datetime import date, time, datetime from functools import lru_cache from typing import Iterator import pyodbc, pytest # WARNING: Wow Microsoft always manages to do the stupidest thing possible always trying to be # smarter than everyone. I worked with their APIs for since before "OLE" and it has always # been a nanny state. They won't read the UID and PWD from odbc.ini because it isn't secure. # Really? Less secure than what? The next hack someone is going to use. Do the straight # forward thing and explain how to secure it. it isn't their business how I deploy and secure. # # For every other DB we use a single default DSN but you can pass your own via an environment # variable. For SS, we can't just use a default DSN unless you want to go trusted. (Which is # more secure? No.) It'll be put into .bashrc most likely. Way to go. Now I'll go rename # all of the others to DB specific names instead of PYODBC_CNXNSTR. Hot garbage as usual. CNXNSTR = os.environ.get('PYODBC_SQLSERVER', 'DSN=pyodbc-sqlserver') def connect(autocommit=False, attrs_before=None): return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before) DRIVER = connect().getinfo(pyodbc.SQL_DRIVER_NAME) IS_FREEDTS = bool(re.search('tsodbc', DRIVER, flags=re.IGNORECASE)) IS_MSODBCSQL = bool(re.search(r'(msodbcsql|sqlncli|sqlsrv32\.dll)', DRIVER, re.IGNORECASE)) def _get_sqlserver_year(): """ Returns the release year of the current version of SQL Server, used to skip tests for features that are not supported. If the current DB is not SQL Server, 0 is returned. """ # We used to use the major version, but most documentation on the web refers to the year # (e.g. SQL Server 2019) so we'll use that for skipping tests that do not apply. if not IS_MSODBCSQL: return 0 cnxn = connect() cursor = cnxn.cursor() row = cursor.execute("exec master..xp_msver 'ProductVersion'").fetchone() major = row.Character_Value.split('.', 1)[0] return { # https://sqlserverbuilds.blogspot.com/ '8': 2000, '9': 2005, '10': 2008, '11': 2012, '12': 2014, '13': 2016, '14': 2017, '15': 2019, '16': 2022 }[major] SQLSERVER_YEAR = _get_sqlserver_year() @pytest.fixture() def cursor() -> Iterator[pyodbc.Cursor]: cnxn = connect() cur = cnxn.cursor() cur.execute("drop table if exists t1") cur.execute("drop table if exists t2") cur.execute("drop table if exists t3") cnxn.commit() yield cur if not cnxn.closed: cur.close() cnxn.close() def test_text(cursor: pyodbc.Cursor): _test_vartype(cursor, 'text') def test_varchar(cursor: pyodbc.Cursor): _test_vartype(cursor, 'varchar') def test_nvarchar(cursor: pyodbc.Cursor): _test_vartype(cursor, 'nvarchar') def test_varbinary(cursor: pyodbc.Cursor): _test_vartype(cursor, 'varbinary') @pytest.mark.skipif(SQLSERVER_YEAR < 2005, reason='(max) not supported until 2005') def test_unicode_longmax(cursor: pyodbc.Cursor): # Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))") def test_char(cursor: pyodbc.Cursor): value = "testing" cursor.execute("create table t1(s char(7))") cursor.execute("insert into t1 values(?)", "testing") v = cursor.execute("select * from t1").fetchone()[0] assert v == value def test_int(cursor: pyodbc.Cursor): _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678]) def test_bigint(cursor: pyodbc.Cursor): _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 0x123456789, 0x7FFFFFFF, 0xFFFFFFFF, 0x123456789]) def test_overflow_int(cursor: pyodbc.Cursor): # python allows integers of any size, bigger than an 8 byte int can contain input = 9999999999999999999999999999999999999 cursor.execute("create table t1(d bigint)") with pytest.raises(OverflowError): cursor.execute("insert into t1 values (?)", input) result = cursor.execute("select * from t1").fetchall() assert result == [] def test_float(cursor: pyodbc.Cursor): _test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200, .00012345]) def test_non_numeric_float(cursor: pyodbc.Cursor): cursor.execute("create table t1(d float)") for input in (float('+Infinity'), float('-Infinity'), float('NaN')): with pytest.raises(pyodbc.ProgrammingError): cursor.execute("insert into t1 values (?)", input) def test_drivers(): p = pyodbc.drivers() assert isinstance(p, list) def test_datasources(): p = pyodbc.dataSources() assert isinstance(p, dict) def test_getinfo_string(): cnxn = connect() value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) assert isinstance(value, str) def test_getinfo_bool(): cnxn = connect() value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) assert isinstance(value, bool) def test_getinfo_int(): cnxn = connect() value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) assert isinstance(value, int) def test_getinfo_smallint(): cnxn = connect() value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) assert isinstance(value, int) def test_no_fetch(cursor: pyodbc.Cursor): # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without # fetches seem to confuse the driver. cursor.execute('select 1') cursor.execute('select 1') cursor.execute('select 1') def test_decode_meta(cursor: pyodbc.Cursor): """ Ensure column names with non-ASCII characters are converted using the configured encodings. """ # This is from GitHub issue #190 cursor.execute("create table t1(a int)") cursor.execute("insert into t1 values (1)") cursor.execute('select a as "Tipología" from t1') assert cursor.description[0][0] == "Tipología" def test_exc_integrity(cursor: pyodbc.Cursor): "Make sure an IntegretyError is raised" # This is really making sure we are properly encoding and comparing the SQLSTATEs. cursor.execute("create table t1(s1 varchar(10) primary key)") cursor.execute("insert into t1 values ('one')") with pytest.raises(pyodbc.IntegrityError): cursor.execute("insert into t1 values ('one')") def test_multiple_bindings(cursor: pyodbc.Cursor): "More than one bind and select on a cursor" cursor.execute("create table t1(n int)") cursor.execute("insert into t1 values (?)", 1) cursor.execute("insert into t1 values (?)", 2) cursor.execute("insert into t1 values (?)", 3) for _ in range(3): cursor.execute("select n from t1 where n < ?", 10) cursor.execute("select n from t1 where n < 3") def test_different_bindings(cursor: pyodbc.Cursor): cursor.execute("create table t1(n int)") cursor.execute("create table t2(d datetime)") cursor.execute("insert into t1 values (?)", 1) cursor.execute("insert into t2 values (?)", datetime.now()) SMALL_FENCEPOST_SIZES = [None, 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000] LARGE_FENCEPOST_SIZES = SMALL_FENCEPOST_SIZES + [4095, 4096, 4097, 10 * 1024, 20 * 1024] def _test_vartype(cursor: pyodbc.Cursor, datatype): if datatype == 'text': lengths = LARGE_FENCEPOST_SIZES else: lengths = SMALL_FENCEPOST_SIZES if datatype == 'text': cursor.execute(f"create table t1(c1 {datatype})") else: maxlen = lengths[-1] cursor.execute(f"create table t1(c1 {datatype}({maxlen}))") for length in lengths: cursor.execute("delete from t1") encoding = (datatype in ('blob', 'varbinary')) and 'utf8' or None value = _generate_str(length, encoding=encoding) try: cursor.execute("insert into t1 values(?)", value) except pyodbc.Error as ex: msg = f'{datatype} insert failed: length={length} len={len(value)}' raise Exception(msg) from ex v = cursor.execute("select * from t1").fetchone()[0] assert v == value def _test_scalar(cursor: pyodbc.Cursor, datatype, values): """ A simple test wrapper for types that are identical when written and read. """ cursor.execute(f"create table t1(c1 {datatype})") for value in values: cursor.execute("delete from t1") cursor.execute("insert into t1 values (?)", value) v = cursor.execute("select c1 from t1").fetchone()[0] assert v == value def test_noscan(cursor: pyodbc.Cursor): assert cursor.noscan is False cursor.noscan = True assert cursor.noscan is True def test_nonnative_uuid(cursor: pyodbc.Cursor): # The default is False meaning we should return a string. Note that # SQL Server seems to always return uppercase. value = uuid.uuid4() cursor.execute("create table t1(n uniqueidentifier)") cursor.execute("insert into t1 values (?)", value) pyodbc.native_uuid = False result = cursor.execute("select n from t1").fetchval() assert isinstance(result, str) assert result == str(value).upper() pyodbc.native_uuid = True def test_native_uuid(cursor: pyodbc.Cursor): # When true, we should return a uuid.UUID object. value = uuid.uuid4() cursor.execute("create table t1(n uniqueidentifier)") cursor.execute("insert into t1 values (?)", value) pyodbc.native_uuid = True result = cursor.execute("select n from t1").fetchval() assert isinstance(result, uuid.UUID) assert value == result def test_nextset(cursor: pyodbc.Cursor): cursor.execute("create table t1(i int)") for i in range(4): cursor.execute("insert into t1(i) values(?)", i) cursor.execute( """ select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i """) for i, row in enumerate(cursor): assert i == row.i assert cursor.nextset() for i, row in enumerate(cursor): assert i + 2 == row.i @pytest.mark.skipif(IS_FREEDTS, reason='https://github.com/FreeTDS/freetds/issues/230') def test_nextset_with_raiserror(cursor: pyodbc.Cursor): cursor.execute("select i = 1; RAISERROR('c', 16, 1);") row = next(cursor) assert 1 == row.i with pytest.raises(pyodbc.ProgrammingError): cursor.nextset() def test_fixed_unicode(cursor: pyodbc.Cursor): value = "t\xebsting" cursor.execute("create table t1(s nchar(7))") cursor.execute("insert into t1 values(?)", "t\xebsting") v = cursor.execute("select * from t1").fetchone()[0] assert isinstance(v, str) assert len(v) == len(value) # If we alloc'd wrong, the test below might work because of an embedded NULL assert v == value def test_chinese(cursor: pyodbc.Cursor): v = '我的' cursor.execute("SELECT N'我的' AS [Name]") row = cursor.fetchone() assert row[0] == v cursor.execute("SELECT N'我的' AS [Name]") rows = cursor.fetchall() assert rows[0][0] == v def test_bit(cursor: pyodbc.Cursor): value = True cursor.execute("create table t1(b bit)") cursor.execute("insert into t1 values (?)", value) v = cursor.execute("select b from t1").fetchone()[0] assert isinstance(v, bool) assert v == value def test_decimal(cursor: pyodbc.Cursor): # From test provided by planders (thanks!) in Issue 91 for (precision, scale, negative) in [ (1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (38, 38, True)]: try: cursor.execute("drop table t1") except: pass cursor.execute(f"create table t1(d decimal({precision}, {scale}))") # Construct a decimal that uses the maximum precision and scale. sign = negative and '-' or '' before = '9' * (precision - scale) after = scale and ('.' + '9' * scale) or '' decStr = f'{sign}{before}{after}' value = Decimal(decStr) cursor.execute("insert into t1 values(?)", value) v = cursor.execute("select d from t1").fetchone()[0] assert v == value def test_decimal_e(cursor: pyodbc.Cursor): """Ensure exponential notation decimals are properly handled""" value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 cursor.execute("create table t1(d decimal(10, 2))") cursor.execute("insert into t1 values (?)", value) result = cursor.execute("select * from t1").fetchone()[0] assert result == value def test_subquery_params(cursor: pyodbc.Cursor): """Ensure parameter markers work in a subquery""" cursor.execute("create table t1(id integer, s varchar(20))") cursor.execute("insert into t1 values (?,?)", 1, 'test') row = cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() assert row is not None assert row[0] == 1 def test_close_cnxn(): """Make sure using a Cursor after closing its connection doesn't crash.""" cnxn = connect() cursor = cnxn.cursor() cursor.execute("drop table if exists t1") cursor.execute("create table t1(id integer, s varchar(20))") cursor.execute("insert into t1 values (?,?)", 1, 'test') cursor.execute("select * from t1") cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) with pytest.raises(pyodbc.ProgrammingError): cursor.execute("select * from t1") def test_empty_string(cursor: pyodbc.Cursor): cursor.execute("create table t1(s varchar(20))") cursor.execute("insert into t1 values(?)", "") def test_empty_string_encoding(): cnxn = connect() cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') value = "" cursor = cnxn.cursor() cursor.execute("create table t1(s varchar(20))") cursor.execute("insert into t1 values(?)", value) v = cursor.execute("select * from t1").fetchone()[0] assert v == value def test_fixed_str(cursor: pyodbc.Cursor): value = "testing" cursor.execute("create table t1(s char(7))") cursor.execute("insert into t1 values(?)", value) v = cursor.execute("select * from t1").fetchone()[0] assert isinstance(v, str) assert len(v) == len(value) # If we alloc'd wrong, the test below might work because of an embedded NULL assert v == value def test_empty_unicode(cursor: pyodbc.Cursor): cursor.execute("create table t1(s nvarchar(20))") cursor.execute("insert into t1 values(?)", "") def test_empty_unicode_encoding(): cnxn = connect() cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') value = "" cursor = cnxn.cursor() cursor.execute("create table t1(s nvarchar(20))") cursor.execute("insert into t1 values(?)", value) v = cursor.execute("select * from t1").fetchone()[0] assert v == value def test_negative_row_index(cursor: pyodbc.Cursor): cursor.execute("create table t1(s varchar(20))") cursor.execute("insert into t1 values(?)", "1") row = cursor.execute("select * from t1").fetchone() assert row[0] == "1" assert row[-1] == "1" def test_version(): assert 3 == len(pyodbc.version.split('.')) # 1.3.1 etc. @pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason='Date not supported until 2008?') def test_date(cursor: pyodbc.Cursor): value = date.today() cursor.execute("create table t1(d date)") cursor.execute("insert into t1 values (?)", value) result = cursor.execute("select d from t1").fetchone()[0] assert isinstance(result, date) assert value == result @pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason='Time not supported until 2008?') def test_time(cursor: pyodbc.Cursor): value = datetime.now().time() # We aren't yet writing values using the new extended time type so the value written to the # database is only down to the second. value = value.replace(microsecond=0) cursor.execute("create table t1(t time)") cursor.execute("insert into t1 values (?)", value) result = cursor.execute("select t from t1").fetchone()[0] assert isinstance(result, time) assert value == result def test_datetime(cursor: pyodbc.Cursor): value = datetime(2007, 1, 15, 3, 4, 5) cursor.execute("create table t1(dt datetime)") cursor.execute("insert into t1 values (?)", value) result = cursor.execute("select dt from t1").fetchone()[0] assert isinstance(result, datetime) assert value == result def test_datetime_fraction(cursor: pyodbc.Cursor): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most # granular datetime supported is xxx000. value = datetime(2007, 1, 15, 3, 4, 5, 123000) cursor.execute("create table t1(dt datetime)") cursor.execute("insert into t1 values (?)", value) result = cursor.execute("select dt from t1").fetchone()[0] assert isinstance(result, datetime) assert value == result def test_datetime_fraction_rounded(cursor: pyodbc.Cursor): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc # rounds down to what the database supports. full = datetime(2007, 1, 15, 3, 4, 5, 123456) rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) cursor.execute("create table t1(dt datetime)") cursor.execute("insert into t1 values (?)", full) result = cursor.execute("select dt from t1").fetchone()[0] assert isinstance(result, datetime) assert rounded == result def test_datetime2(cursor: pyodbc.Cursor): value = datetime(2007, 1, 15, 3, 4, 5) cursor.execute("create table t1(dt datetime2)") cursor.execute("insert into t1 values (?)", value) result = cursor.execute("select dt from t1").fetchone()[0] assert isinstance(result, datetime) assert value == result def test_sp_results(cursor: pyodbc.Cursor): cursor.execute( """ Create procedure proc1 AS select top 10 name, id, xtype, refdate from sysobjects """) rows = cursor.execute("exec proc1").fetchall() assert isinstance(rows, list) assert len(rows) == 10 # there has to be at least 10 items in sysobjects assert isinstance(rows[0].refdate, datetime) def test_sp_results_from_temp(cursor: pyodbc.Cursor): # Note: I've used "set nocount on" so that we don't get the number of rows deleted from # #tmptable. If you don't do this, you'd need to call nextset() once to skip it. cursor.execute( """ Create procedure proc1 AS set nocount on select top 10 name, id, xtype, refdate into #tmptable from sysobjects select * from #tmptable """) cursor.execute("exec proc1") assert cursor.description is not None assert len(cursor.description) == 4 rows = cursor.fetchall() assert isinstance(rows, list) assert len(rows) == 10 # there has to be at least 10 items in sysobjects assert isinstance(rows[0].refdate, datetime) def test_sp_results_from_vartbl(cursor: pyodbc.Cursor): cursor.execute( """ Create procedure proc1 AS set nocount on declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime) insert into @tmptbl select top 10 name, id, xtype, refdate from sysobjects select * from @tmptbl """) cursor.execute("exec proc1") rows = cursor.fetchall() assert isinstance(rows, list) assert len(rows) == 10 # there has to be at least 10 items in sysobjects assert isinstance(rows[0].refdate, datetime) def test_sp_with_dates(cursor: pyodbc.Cursor): # Reported in the forums that passing two datetimes to a stored procedure doesn't work. cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) cursor.execute( """ create procedure test_sp(@d1 datetime, @d2 datetime) AS declare @d as int set @d = datediff(year, @d1, @d2) select @d """) cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) rows = cursor.fetchall() assert rows is not None assert rows[0][0] == 0 # 0 years apart def test_sp_with_none(cursor: pyodbc.Cursor): # Reported in the forums that passing None caused an error. cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) cursor.execute( """ create procedure test_sp(@x varchar(20)) AS declare @y varchar(20) set @y = @x select @y """) cursor.execute("exec test_sp ?", None) rows = cursor.fetchall() assert rows is not None assert rows[0][0] is None # 0 years apart # # rowcount # def test_rowcount_delete(cursor: pyodbc.Cursor): assert cursor.rowcount == -1 cursor.execute("create table t1(i int)") count = 4 for i in range(count): cursor.execute("insert into t1 values (?)", i) cursor.execute("delete from t1") assert cursor.rowcount == count def test_rowcount_nodata(cursor: pyodbc.Cursor): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ cursor.execute("create table t1(i int)") # This is a different code path internally. cursor.execute("delete from t1") assert cursor.rowcount == 0 def test_rowcount_select(cursor: pyodbc.Cursor): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a select statement, so we'll test for that behavior. This is valid behavior according to the DB API specification, but people don't seem to like it. """ cursor.execute("create table t1(i int)") count = 4 for i in range(count): cursor.execute("insert into t1 values (?)", i) cursor.execute("select * from t1") assert cursor.rowcount == -1 rows = cursor.fetchall() assert len(rows) == count assert cursor.rowcount == -1 def test_rowcount_reset(cursor: pyodbc.Cursor): "Ensure rowcount is reset after DDL" cursor.execute("create table t1(i int)") count = 4 for i in range(count): cursor.execute("insert into t1 values (?)", i) assert cursor.rowcount == 1 cursor.execute("create table t2(i int)") ddl_rowcount = (0 if IS_FREEDTS else -1) assert cursor.rowcount == ddl_rowcount def test_retcursor_delete(cursor: pyodbc.Cursor): cursor.execute("create table t1(i int)") cursor.execute("insert into t1 values (1)") v = cursor.execute("delete from t1") assert v == cursor def test_retcursor_nodata(cursor: pyodbc.Cursor): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. """ cursor.execute("create table t1(i int)") # This is a different code path internally. v = cursor.execute("delete from t1") assert v == cursor def test_retcursor_select(cursor: pyodbc.Cursor): cursor.execute("create table t1(i int)") cursor.execute("insert into t1 values (1)") v = cursor.execute("select * from t1") assert v == cursor def table_with_spaces(cursor: pyodbc.Cursor): "Ensure we can select using [x z] syntax" try: cursor.execute("create table [test one](int n)") cursor.execute("insert into [test one] values(1)") cursor.execute("select * from [test one]") v = cursor.fetchone()[0] assert v == 1 finally: cursor.rollback() def test_lower_case(): "Ensure pyodbc.lowercase forces returned column names to lowercase." try: pyodbc.lowercase = True cnxn = connect() cursor = cnxn.cursor() cursor.execute("create table t1(Abc int, dEf int)") cursor.execute("select * from t1") names = [t[0] for t in cursor.description] names.sort() assert names == ["abc", "def"] finally: # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(cursor: pyodbc.Cursor): """ Ensure Cursor.description is accessible as Row.cursor_description. """ cursor.execute("create table t1(a int, b char(3))") cursor.execute("insert into t1 values(1, 'abc')") row = cursor.execute("select * from t1").fetchone() assert cursor.description == row.cursor_description def test_temp_select(cursor: pyodbc.Cursor): # A project was failing to create temporary tables via select into. cursor.execute("create table t1(s char(7))") cursor.execute("insert into t1 values(?)", "testing") v = cursor.execute("select * from t1").fetchone()[0] assert isinstance(v, str) assert v == "testing" cursor.execute("select s into t2 from t1") v = cursor.execute("select * from t1").fetchone()[0] assert isinstance(v, str) assert v == "testing" def test_executemany(cursor: pyodbc.Cursor): cursor.execute("create table t1(a int, b varchar(10))") params = [(i, str(i)) for i in range(1, 6)] cursor.executemany("insert into t1(a, b) values (?,?)", params) count = cursor.execute("select count(*) from t1").fetchone()[0] assert count == len(params) cursor.execute("select a, b from t1 order by a") rows = cursor.fetchall() assert count == len(rows) for param, row in zip(params, rows): assert param[0] == row[0] assert param[1] == row[1] def test_executemany_one(cursor: pyodbc.Cursor): "Pass executemany a single sequence" cursor.execute("create table t1(a int, b varchar(10))") params = [(1, "test")] cursor.executemany("insert into t1(a, b) values (?,?)", params) count = cursor.execute("select count(*) from t1").fetchone()[0] assert count == len(params) cursor.execute("select a, b from t1 order by a") rows = cursor.fetchall() assert count == len(rows) for param, row in zip(params, rows): assert param[0] == row[0] assert param[1] == row[1] def test_executemany_dae_0(cursor: pyodbc.Cursor): """ DAE for 0-length value """ cursor.execute("create table t1(a nvarchar(max))") cursor.fast_executemany = True cursor.executemany("insert into t1(a) values(?)", [['']]) assert cursor.execute("select a from t1").fetchone()[0] == '' cursor.fast_executemany = False def test_executemany_failure(cursor: pyodbc.Cursor): """ Ensure that an exception is raised if one query in an executemany fails. """ cursor.execute("create table t1(a int, b varchar(10))") params = [(1, 'good'), ('error', 'not an int'), (3, 'good')] with pytest.raises(pyodbc.Error): cursor.executemany("insert into t1(a, b) value (?, ?)", params) def test_row_slicing(cursor: pyodbc.Cursor): cursor.execute("create table t1(a int, b int, c int, d int)") cursor.execute("insert into t1 values(1,2,3,4)") row = cursor.execute("select * from t1").fetchone() result = row[:] assert result is row result = row[:-1] assert result == (1, 2, 3) result = row[0:4] assert result is row def test_row_repr(cursor: pyodbc.Cursor): cursor.execute("create table t1(a int, b int, c int, d varchar(50))") cursor.execute("insert into t1 values(1,2,3,'four')") row = cursor.execute("select * from t1").fetchone() result = str(row) assert result == "(1, 2, 3, 'four')" result = str(row[:-1]) assert result == "(1, 2, 3)" result = str(row[:1]) assert result == "(1,)" def test_concatenation(cursor: pyodbc.Cursor): v2 = '0123456789' * 30 v3 = '9876543210' * 30 cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) row = cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() assert row.both == v2 + v3 def test_view_select(cursor: pyodbc.Cursor): # Reported in forum: Can't select from a view? I think I do this a lot, but another test # never hurts. # Create a table (t1) with 3 rows and a view (t2) into it. cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") for i in range(3): cursor.execute("insert into t1(c2) values (?)", f"string{i}") cursor.execute("create view t2 as select * from t1") # Select from the view cursor.execute("select * from t2") rows = cursor.fetchall() assert rows is not None assert len(rows) == 3 def test_autocommit(): cnxn = connect() assert cnxn.autocommit is False cnxn = None cnxn = connect(autocommit=True) assert cnxn.autocommit is True cnxn.autocommit = False assert cnxn.autocommit is False def test_sqlserver_callproc(cursor: pyodbc.Cursor): try: cursor.execute("drop procedure pyodbctest") cursor.commit() except: pass cursor.execute("create table t1(s varchar(10))") cursor.execute("insert into t1 values(?)", "testing") cursor.execute(""" create procedure pyodbctest @var1 varchar(32) as begin select s from t1 return end """) cursor.execute("exec pyodbctest 'hi'") def test_skip(cursor: pyodbc.Cursor): # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. cursor.execute("create table t1(id int)") for i in range(1, 5): cursor.execute("insert into t1 values(?)", i) cursor.execute("select id from t1 order by id") assert cursor.fetchone()[0] == 1 cursor.skip(2) assert cursor.fetchone()[0] == 4 def test_timeout(): cnxn = connect() assert cnxn.timeout == 0 # defaults to zero (off) cnxn.timeout = 30 assert cnxn.timeout == 30 cnxn.timeout = 0 assert cnxn.timeout == 0 def test_sets_execute(cursor: pyodbc.Cursor): # Only lists and tuples are allowed. cursor.execute("create table t1 (word varchar (100))") words = {'a', 'b', 'c'} with pytest.raises(pyodbc.ProgrammingError): cursor.execute("insert into t1 (word) values (?)", words) with pytest.raises(pyodbc.ProgrammingError): cursor.executemany("insert into t1 (word) values (?)", words) def test_row_execute(cursor: pyodbc.Cursor): "Ensure we can use a Row object as a parameter to execute" cursor.execute("create table t1(n int, s varchar(10))") cursor.execute("insert into t1 values (1, 'a')") row = cursor.execute("select n, s from t1").fetchone() assert row cursor.execute("create table t2(n int, s varchar(10))") cursor.execute("insert into t2 values (?, ?)", row) def test_row_executemany(cursor: pyodbc.Cursor): "Ensure we can use a Row object as a parameter to executemany" cursor.execute("create table t1(n int, s varchar(10))") for i in range(3): cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a') + i)) rows = cursor.execute("select n, s from t1").fetchall() assert len(rows) != 0 cursor.execute("create table t2(n int, s varchar(10))") cursor.executemany("insert into t2 values (?, ?)", rows) def test_description(cursor: pyodbc.Cursor): "Ensure cursor.description is correct" cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") cursor.execute("insert into t1 values (1, 'abc', '1.23')") cursor.execute("select * from t1") # (I'm not sure the precision of an int is constant across different versions, bits, so I'm # hand checking the items I do know. # int t = cursor.description[0] assert t[0] == 'n' assert t[1] == int assert t[5] == 0 # scale assert t[6] is True # nullable # varchar(8) t = cursor.description[1] assert t[0] == 's' assert t[1] == str assert t[4] == 8 # precision assert t[5] == 0 # scale assert t[6] is True # nullable # decimal(5, 2) t = cursor.description[2] assert t[0] == 'd' assert t[1] == Decimal assert t[4] == 5 # precision assert t[5] == 2 # scale assert t[6] is True # nullable def test_cursor_messages_with_print(cursor: pyodbc.Cursor): """ Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement. """ assert not cursor.messages # ascii / extended ascii / unicode / beyond BMP unicode for msg in ('hello world', 'a \xeb a', 'b \u0394 b', 'c \U0001F31C c'): cursor.execute(f"PRINT N'{msg}'") # note, unicode literal messages = cursor.messages assert isinstance(messages, list) assert len(messages) == 1 assert isinstance(messages[0], tuple) assert len(messages[0]) == 2 assert isinstance(messages[0][0], str) assert isinstance(messages[0][1], str) assert '[01000] (0)' == messages[0][0] assert messages[0][1].endswith(msg) # maximum size message # SQL Server PRINT statements are never more than 8000 characters # https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks msg = 'ABCDEFGH' * 1000 cursor.execute(f"PRINT '{msg}'") # note, plain ascii literal messages = cursor.messages assert len(messages) == 1 assert messages[0][1].endswith(msg) def test_cursor_messages_with_fast_executemany(cursor: pyodbc.Cursor): """ Ensure the Cursor.messages attribute is set with fast_executemany=True. """ cursor.execute("create table t2(id1 int, id2 int)") cursor.commit() cursor.fast_executemany = True cursor.executemany( "print 'hello';insert into t2(id1, id2) values (?, ?)", [(10, 11), (20, 21)], ) assert len(cursor.messages) == 2 assert all(m[1].endswith('hello') for m in cursor.messages) def test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor): """ Complex scenario to test the Cursor.messages attribute. """ cursor.execute(""" create or alter procedure test_cursor_messages as begin set nocount on; print 'Message 1a'; print 'Message 1b'; select N'Field 1a' AS F UNION ALL SELECT N'Field 1b'; select N'Field 2a' AS F UNION ALL SELECT N'Field 2b'; print 'Message 2a'; print 'Message 2b'; end """) # The messages will look like: # # [Microsoft][ODBC Driver 18 for SQL Server][SQL Server]Message 1a # result set 1: messages, rows cursor.execute("exec test_cursor_messages") vals = [row[0] for row in cursor.fetchall()] assert vals == ['Field 1a', 'Field 1b'] msgs = [ re.search(r'Message \d[ab]$', m[1]).group(0) for m in cursor.messages ] assert msgs == ['Message 1a', 'Message 1b'] # result set 2: rows, no messages assert cursor.nextset() vals = [row[0] for row in cursor.fetchall()] assert vals == ['Field 2a', 'Field 2b'] assert not cursor.messages # result set 3: messages, no rows assert cursor.nextset() with pytest.raises(pyodbc.ProgrammingError): cursor.fetchall() msgs = [ re.search(r'Message \d[ab]$', m[1]).group(0) for m in cursor.messages ] assert msgs == ['Message 2a', 'Message 2b'] # result set 4: no rows, no messages assert not cursor.nextset() with pytest.raises(pyodbc.ProgrammingError): cursor.fetchall() assert not cursor.messages def test_none_param(cursor: pyodbc.Cursor): "Ensure None can be used for params other than the first" # Some driver/db versions would fail if NULL was not the first parameter because # SQLDescribeParam (only used with NULL) could not be used after the first call to # SQLBindParameter. This means None always worked for the first column, but did not work # for later columns. # # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. # However, binary/varbinary won't allow an implicit conversion. cursor.execute("create table t1(n int, blob varbinary(max))") cursor.execute("insert into t1 values (1, newid())") row = cursor.execute("select * from t1").fetchone() assert row.n == 1 assert isinstance(row.blob, bytes) sql = "update t1 set n=?, blob=?" try: cursor.execute(sql, 2, None) except pyodbc.DataError: if IS_FREEDTS: # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so pyodbc # can't call SQLDescribeParam to get the correct parameter type. This can lead to # errors being returned from SQL Server when sp_prepexec is called, e.g., "Implicit # conversion from data type varchar to varbinary(max) is not allowed." # # So at least verify that the user can manually specify the parameter type cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) cursor.execute(sql, 2, None) else: raise row = cursor.execute("select * from t1").fetchone() assert row.n == 2 assert row.blob is None def test_output_conversion(): def convert1(value): # The value is the raw bytes (as a bytes object) read from the # database. We'll simply add an X at the beginning at the end. return 'X' + value.decode('latin1') + 'X' def convert2(value): # Same as above, but add a Y at the beginning at the end. return 'Y' + value.decode('latin1') + 'Y' cnxn = connect() cursor = cnxn.cursor() cursor.execute("create table t1(n int, v varchar(10))") cursor.execute("insert into t1 values (1, '123.45')") cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) value = cursor.execute("select v from t1").fetchone()[0] assert value == 'X123.45X' # Clear all conversions and try again. There should be no Xs this time. cnxn.clear_output_converters() value = cursor.execute("select v from t1").fetchone()[0] assert value == '123.45' # Same but clear using remove_output_converter. cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) value = cursor.execute("select v from t1").fetchone()[0] assert value == 'X123.45X' cnxn.remove_output_converter(pyodbc.SQL_VARCHAR) value = cursor.execute("select v from t1").fetchone()[0] assert value == '123.45' # Clear via add_output_converter, passing None for the converter function. cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) value = cursor.execute("select v from t1").fetchone()[0] assert value == 'X123.45X' cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None) value = cursor.execute("select v from t1").fetchone()[0] assert value == '123.45' # retrieve and temporarily replace converter (get_output_converter) # # case_1: converter already registered cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) value = cursor.execute("select v from t1").fetchone()[0] assert value == 'X123.45X' prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR) assert prev_converter is not None cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2) value = cursor.execute("select v from t1").fetchone()[0] assert value == 'Y123.45Y' cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter) value = cursor.execute("select v from t1").fetchone()[0] assert value == 'X123.45X' # # case_2: no converter already registered cnxn.clear_output_converters() value = cursor.execute("select v from t1").fetchone()[0] assert value == '123.45' prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR) assert prev_converter is None cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2) value = cursor.execute("select v from t1").fetchone()[0] assert value == 'Y123.45Y' cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter) value = cursor.execute("select v from t1").fetchone()[0] assert value == '123.45' def test_too_large(cursor: pyodbc.Cursor): """Ensure error raised if insert fails due to truncation""" value = 'x' * 1000 cursor.execute("create table t1(s varchar(800))") with pytest.raises(pyodbc.Error): cursor.execute("insert into t1 values (?)", value) def test_row_equal(cursor: pyodbc.Cursor): cursor.execute("create table t1(n int, s varchar(20))") cursor.execute("insert into t1 values (1, 'test')") row1 = cursor.execute("select n, s from t1").fetchone() row2 = cursor.execute("select n, s from t1").fetchone() assert row1 == row2 def test_row_gtlt(cursor: pyodbc.Cursor): cursor.execute("create table t1(n int, s varchar(20))") cursor.execute("insert into t1 values (1, 'test1')") cursor.execute("insert into t1 values (1, 'test2')") rows = cursor.execute("select n, s from t1 order by s").fetchall() assert rows[0] < rows[1] assert rows[0] <= rows[1] assert rows[1] > rows[0] assert rows[1] >= rows[0] assert rows[0] != rows[1] rows = list(rows) rows.sort() # uses < def test_context_manager_success(): "Ensure `with` commits if an exception is not raised" cnxn = connect() cursor = cnxn.cursor() cursor.execute("create table t1(n int)") cnxn.commit() with cnxn: cursor.execute("insert into t1 values (1)") rows = cursor.execute("select n from t1").fetchall() assert len(rows) == 1 assert rows[0][0] == 1 def test_context_manager_failure(cursor: pyodbc.Cursor): "Ensure `with` rolls back if an exception is raised" cnxn = connect() cursor = cnxn.cursor() # We'll insert a row and commit it. Then we'll insert another row followed by an # exception. cursor.execute("create table t1(n int)") cursor.execute("insert into t1 values (1)") cnxn.commit() with pytest.raises(pyodbc.Error): with cnxn: cursor.execute("insert into t1 values (2)") cursor.execute("delete from bogus") cursor.execute("select max(n) from t1") val = cursor.fetchval() assert val == 1 def test_untyped_none(cursor: pyodbc.Cursor): # From issue 129 value = cursor.execute("select ?", None).fetchone()[0] assert value is None def test_large_update_nodata(cursor: pyodbc.Cursor): cursor.execute('create table t1(a varbinary(max))') hundredkb = b'x' * 100 * 1024 cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) def test_func_param(cursor: pyodbc.Cursor): try: cursor.execute("drop function func1") except: pass cursor.execute(""" create function func1 (@testparam varchar(4)) returns @rettest table (param varchar(4)) as begin insert @rettest select @testparam return end """) cursor.commit() value = cursor.execute("select * from func1(?)", 'test').fetchone()[0] assert value == 'test' def test_columns(cursor: pyodbc.Cursor): # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error # # Error: TypeError: argument 2 must be str, not None # # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an # optional string keyword when calling indirectly. cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))") cursor.columns('t1') results = {row.column_name: row for row in cursor} row = results['a'] assert row.type_name == 'int', row.type_name row = results['b'] assert row.type_name == 'varchar' assert row.column_size == 3 # Now do the same, but specifically pass in None to one of the keywords. Old versions # were parsing arguments incorrectly and would raise an error. (This crops up when # calling indirectly like columns(*args, **kwargs) which aiodbc does.) cursor.columns('t1', schema=None, catalog=None) results = {row.column_name: row for row in cursor} row = results['a'] assert row.type_name == 'int', row.type_name row = results['b'] assert row.type_name == 'varchar' assert row.column_size == 3 row = results['xΏz'] assert row.type_name == 'varchar' assert row.column_size == 4, row.column_size for i in range(8, 16): table_name = 'pyodbc_89abcdef'[:i] cursor.execute(f""" IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name}; CREATE TABLE {table_name} (id INT PRIMARY KEY); """) col_count = len([col.column_name for col in cursor.columns(table_name)]) assert col_count == 1 cursor.execute(f"drop table {table_name}") def test_cancel(cursor: pyodbc.Cursor): # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with # making sure SQLCancel is called correctly. cursor.execute("select 1") cursor.cancel() def test_emoticons_as_parameter(cursor: pyodbc.Cursor): # https://github.com/mkleehammer/pyodbc/issues/423 # # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number # of characters. Ensure it works even with 4-byte characters. # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = "x \U0001F31C z" cursor.execute("create table t1(s nvarchar(100))") cursor.execute("insert into t1 values (?)", v) result = cursor.execute("select s from t1").fetchone()[0] assert result == v def test_emoticons_as_literal(cursor: pyodbc.Cursor): # similar to `test_emoticons_as_parameter`, above, except for Unicode literal # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm # FreeTDS ODBC issue fixed in version 1.1.23 # https://github.com/FreeTDS/freetds/issues/317 v = "x \U0001F31C z" cursor.execute("create table t1(s nvarchar(100))") cursor.execute(f"insert into t1 values (N'{v}')") result = cursor.execute("select s from t1").fetchone()[0] assert result == v def _test_tvp(cursor: pyodbc.Cursor, diff_schema): # Test table value parameters (TVP). I like the explanation here: # # https://www.mssqltips.com/sqlservertip/1483/using-table-valued-parameters-tvp-in-sql-server/ # # "At a high level the TVP allows you to populate a table declared as a T-SQL variable, # then pass that table as a parameter to a stored procedure or function." # # "The TVP must be declared READONLY. You cannot perform any DML (i.e. INSERT, UPDATE, # DELETE) against the TVP; you can only reference it in a SELECT statement." # # In this test we'll create a table, pass it to a stored procedure, and have the stored # procedure simply return the rows from the TVP. # # Apparently the way pyodbc knows something is a TVP is because it is in a sequence. I'm # not sure I like that as it is very generic and specific to SQL Server. It would be wiser # to define a wrapper pyodbc.TVP or pyodbc.Table object, similar to the DB APIs `Binary` # object. pyodbc.native_uuid = True # This is the default, but we'll reset it in case a previous test fails to. procname = 'SelectTVP' typename = 'TestTVP' if diff_schema: schemaname = 'myschema' procname = schemaname + '.' + procname typenameonly = typename typename = schemaname + '.' + typename # (Don't use "if exists" since older SQL Servers don't support it.) try: cursor.execute("drop procedure " + procname) except: pass try: cursor.execute("drop type " + typename) except: pass if diff_schema: try: cursor.execute("drop schema " + schemaname) except: pass cursor.commit() if diff_schema: cursor.execute("CREATE SCHEMA myschema") cursor.commit() cursor.execute( f""" CREATE TYPE {typename} AS TABLE( c01 VARCHAR(255), c02 VARCHAR(MAX), c03 VARBINARY(255), c04 VARBINARY(MAX), c05 BIT, c06 DATE, c07 TIME, c08 DATETIME2(5), c09 BIGINT, c10 FLOAT, c11 NUMERIC(38, 24), c12 UNIQUEIDENTIFIER) """) cursor.commit() cursor.execute( f""" CREATE PROCEDURE {procname} @TVP {typename} READONLY AS SELECT * FROM @TVP; """) cursor.commit() # The values aren't exactly VERY_LONG_LEN but close enough and *significantly* faster than # the loop we had before. VERY_LONG_LEN = 2000000 long_string = ''.join(chr(i) for i in range(32, 127)) # printable characters long_bytearray = bytes(list(range(255))) very_long_string = long_string * (VERY_LONG_LEN // len(long_string)) very_long_bytearray = long_bytearray * (VERY_LONG_LEN // len(long_bytearray)) params = [ # Three rows with all of the types in the table defined above. ( 'abc', 'abc', bytes([0xD1, 0xCE, 0xFA, 0xCE]), bytes([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), True, date(1997, 8, 29), time(9, 13, 39), datetime(2018, 11, 13, 13, 33, 26, 298420), 1234567, 3.14, Decimal('31234567890123.141243449787580175325274'), uuid.UUID('4fe34a93-e574-04cc-200a-353f0d1770b1'), ), ( '', '', bytes([0x00, 0x01, 0x02, 0x03, 0x04]), bytes([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), False, date(1, 1, 1), time(0, 0, 0), datetime(1, 1, 1, 0, 0, 0, 0), -9223372036854775808, -1.79E+308, Decimal('0.000000000000000000000001'), uuid.UUID('33f7504c-2bac-1b83-01d1-7434a7ba6a17'), ), ( long_string, very_long_string, bytes(long_bytearray), bytes(very_long_bytearray), True, date(9999, 12, 31), time(23, 59, 59), datetime(9999, 12, 31, 23, 59, 59, 999990), 9223372036854775807, 1.79E+308, Decimal('99999999999999.999999999999999999999999'), uuid.UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), ) ] if diff_schema: p1 = [[typenameonly, schemaname] + params] else: p1 = [params] result_array = [tuple(row) for row in cursor.execute(f"exec {procname} ?", p1).fetchall()] # The values make it very difficult to troubleshoot if something is wrong, so instead of # asserting they are the same, we'll walk them if there is a problem to identify which is # wrong. for row, param in zip(result_array, params): if row != param: for r, p in zip(row, param): assert r == p # Now test with zero rows. params = [] p1 = [params] if diff_schema: p1 = [[typenameonly, schemaname] + params] else: p1 = [params] result_array = cursor.execute(f"exec {procname} ?", p1).fetchall() assert result_array == params @pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP') def test_tvp(cursor: pyodbc.Cursor): _test_tvp(cursor, False) @pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP') def test_tvp_diffschema(cursor: pyodbc.Cursor): _test_tvp(cursor, True) def get_sqlserver_version(cursor: pyodbc.Cursor): """ Returns the major version: 8-->2000, 9-->2005, 10-->2008 """ cursor.execute("exec master..xp_msver 'ProductVersion'") row = cursor.fetchone() return int(row.Character_Value.split('.', 1)[0]) @lru_cache def _generate_str(length, encoding=None): """ Returns either a string or bytes, depending on whether encoding is provided, that is `length` elements long. If length is None, None is returned. This simplifies the tests by letting us put None into an array of other lengths and pass them here, moving the special case check into one place. """ if length is None: return None # Put non-ASCII characters at the front so we don't end up chopping one in half in a # multi-byte encoding like UTF-8. v = 'á' remaining = max(0, length - len(v)) if remaining: seed = '0123456789-abcdefghijklmnopqrstuvwxyz-' if remaining <= len(seed): v += seed else: c = (remaining + len(seed) - 1 // len(seed)) v += seed * c if encoding: v = v.encode(encoding) # We chop *after* encoding because if we are encoding then we want bytes. v = v[:length] return v