pyodbc-3.0.7/0000775000175000017500000000000012611462502011454 5ustar dokodokopyodbc-3.0.7/LICENSE.txt0000666000175000017500000000154412031131304013272 0ustar dokodokoPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pyodbc-3.0.7/PKG-INFO0000666000175000017500000000164512146222252012560 0ustar dokodokoMetadata-Version: 1.1 Name: pyodbc Version: 3.0.7 Summary: DB API Module for ODBC Home-page: http://code.google.com/p/pyodbc Author: Michael Kleehammer Author-email: michael@kleehammer.com License: MIT Download-URL: http://code.google.com/p/pyodbc/downloads/list Description: A Python DB API 2 module for ODBC. This project provides an up-to-date, convenient interface to ODBC using native data types like datetime and decimal. Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 3 Classifier: Topic :: Database pyodbc-3.0.7/MANIFEST.in0000666000175000017500000000024012031131304013175 0ustar dokodokoinclude src/*.h include src/*.cpp include tests/* include README.rst include LICENSE.txt # Include this file, needed for bdist_rpm include MANIFEST.in pyodbc-3.0.7/src/0000775000175000017500000000000012611462502012243 5ustar dokodokopyodbc-3.0.7/src/connection.cpp0000666000175000017500000007727412146217440015133 0ustar dokodoko // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pyodbc.h" #include "connection.h" #include "cursor.h" #include "pyodbcmodule.h" #include "errors.h" #include "wrapper.h" #include "cnxninfo.h" #include "sqlwchar.h" static char connection_doc[] = "Connection objects manage connections to the database.\n" "\n" "Each manages a single ODBC HDBC."; static Connection* Connection_Validate(PyObject* self) { Connection* cnxn; if (self == 0 || !Connection_Check(self)) { PyErr_SetString(PyExc_TypeError, "Connection object required"); return 0; } cnxn = (Connection*)self; if (cnxn->hdbc == SQL_NULL_HANDLE) { PyErr_SetString(ProgrammingError, "Attempt to use a closed connection."); return 0; } return cnxn; } static bool Connect(PyObject* pConnectString, HDBC hdbc, bool fAnsi, long timeout) { // This should have been checked by the global connect function. I(PyString_Check(pConnectString) || PyUnicode_Check(pConnectString)); const int cchMax = 600; if (PySequence_Length(pConnectString) >= cchMax) { PyErr_SetString(PyExc_TypeError, "connection string too long"); return false; } // The driver manager determines if the app is a Unicode app based on whether we call SQLDriverConnectA or // SQLDriverConnectW. Some drivers, notably Microsoft Access/Jet, change their behavior based on this, so we try // the Unicode version first. (The Access driver only supports Unicode text, but SQLDescribeCol returns SQL_CHAR // instead of SQL_WCHAR if we connect with the ANSI version. Obviously this causes lots of errors since we believe // what it tells us (SQL_CHAR).) // Python supports only UCS-2 and UCS-4, so we shouldn't need to worry about receiving surrogate pairs. However, // Windows does use UCS-16, so it is possible something would be misinterpreted as one. We may need to examine // this more. SQLRETURN ret; if (timeout > 0) { Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(hdbc, SQL_ATTR_LOGIN_TIMEOUT, (SQLPOINTER)timeout, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) RaiseErrorFromHandle("SQLSetConnectAttr(SQL_ATTR_LOGIN_TIMEOUT)", hdbc, SQL_NULL_HANDLE); } if (!fAnsi) { SQLWChar connectString(pConnectString); Py_BEGIN_ALLOW_THREADS ret = SQLDriverConnectW(hdbc, 0, connectString, (SQLSMALLINT)connectString.size(), 0, 0, 0, SQL_DRIVER_NOPROMPT); Py_END_ALLOW_THREADS if (SQL_SUCCEEDED(ret)) return true; // The Unicode function failed. If the error is that the driver doesn't have a Unicode version (IM001), continue // to the ANSI version. // // I've commented this out since a number of common drivers are returning different errors. The MySQL 5 // driver, for example, returns IM002 "Data source name not found...". // // PyObject* error = GetErrorFromHandle("SQLDriverConnectW", hdbc, SQL_NULL_HANDLE); // if (!HasSqlState(error, "IM001")) // { // RaiseErrorFromException(error); // return false; // } // Py_XDECREF(error); } SQLCHAR szConnect[cchMax]; if (PyUnicode_Check(pConnectString)) { Py_UNICODE* p = PyUnicode_AS_UNICODE(pConnectString); for (Py_ssize_t i = 0, c = PyUnicode_GET_SIZE(pConnectString); i <= c; i++) { if (p[i] > 0xFF) { PyErr_SetString(PyExc_TypeError, "A Unicode connection string was supplied but the driver does " "not have a Unicode connect function"); return false; } szConnect[i] = (SQLCHAR)p[i]; } } else { #if PY_MAJOR_VERSION < 3 const char* p = PyString_AS_STRING(pConnectString); memcpy(szConnect, p, (size_t)(PyString_GET_SIZE(pConnectString) + 1)); #else PyErr_SetString(PyExc_TypeError, "Connection strings must be Unicode"); return false; #endif } Py_BEGIN_ALLOW_THREADS ret = SQLDriverConnect(hdbc, 0, szConnect, SQL_NTS, 0, 0, 0, SQL_DRIVER_NOPROMPT); Py_END_ALLOW_THREADS if (SQL_SUCCEEDED(ret)) return true; RaiseErrorFromHandle("SQLDriverConnect", hdbc, SQL_NULL_HANDLE); return false; } PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi, bool fUnicodeResults, long timeout, bool fReadOnly) { // pConnectString // A string or unicode object. (This must be checked by the caller.) // // fAnsi // If true, do not attempt a Unicode connection. // // fUnicodeResults // If true, return strings in rows as unicode objects. // // Allocate HDBC and connect // HDBC hdbc = SQL_NULL_HANDLE; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLAllocHandle(SQL_HANDLE_DBC, henv, &hdbc); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLAllocHandle", SQL_NULL_HANDLE, SQL_NULL_HANDLE); if (!Connect(pConnectString, hdbc, fAnsi, timeout)) { // Connect has already set an exception. Py_BEGIN_ALLOW_THREADS SQLFreeHandle(SQL_HANDLE_DBC, hdbc); Py_END_ALLOW_THREADS return 0; } // // Connected, so allocate the Connection object. // // Set all variables to something valid, so we don't crash in dealloc if this function fails. #ifdef _MSC_VER #pragma warning(disable : 4365) #endif Connection* cnxn = PyObject_NEW(Connection, &ConnectionType); #ifdef _MSC_VER #pragma warning(default : 4365) #endif if (cnxn == 0) { Py_BEGIN_ALLOW_THREADS SQLFreeHandle(SQL_HANDLE_DBC, hdbc); Py_END_ALLOW_THREADS return 0; } cnxn->hdbc = hdbc; cnxn->nAutoCommit = fAutoCommit ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF; cnxn->searchescape = 0; cnxn->timeout = 0; cnxn->unicode_results = fUnicodeResults; cnxn->conv_count = 0; cnxn->conv_types = 0; cnxn->conv_funcs = 0; // // Initialize autocommit mode. // // The DB API says we have to default to manual-commit, but ODBC defaults to auto-commit. We also provide a // keyword parameter that allows the user to override the DB API and force us to start in auto-commit (in which // case we don't have to do anything). if (fAutoCommit == false) { SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)cnxn->nAutoCommit, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLSetConnnectAttr(SQL_ATTR_AUTOCOMMIT)", cnxn->hdbc, SQL_NULL_HANDLE); Py_DECREF(cnxn); return 0; } } if (fReadOnly) { SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_ACCESS_MODE, (SQLPOINTER)SQL_MODE_READ_ONLY, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLSetConnnectAttr(SQL_ATTR_ACCESS_MODE)", cnxn->hdbc, SQL_NULL_HANDLE); Py_DECREF(cnxn); return 0; } } TRACE("cnxn.new cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); // // Gather connection-level information we'll need later. // Object info(GetConnectionInfo(pConnectString, cnxn)); if (!info.IsValid()) { Py_DECREF(cnxn); return 0; } CnxnInfo* p = (CnxnInfo*)info.Get(); cnxn->odbc_major = p->odbc_major; cnxn->odbc_minor = p->odbc_minor; cnxn->supports_describeparam = p->supports_describeparam; cnxn->datetime_precision = p->datetime_precision; cnxn->varchar_maxlength = p->varchar_maxlength; cnxn->wvarchar_maxlength = p->wvarchar_maxlength; cnxn->binary_maxlength = p->binary_maxlength; cnxn->need_long_data_len = p->need_long_data_len; return reinterpret_cast(cnxn); } static void _clear_conv(Connection* cnxn) { if (cnxn->conv_count != 0) { pyodbc_free(cnxn->conv_types); cnxn->conv_types = 0; for (int i = 0; i < cnxn->conv_count; i++) Py_XDECREF(cnxn->conv_funcs[i]); pyodbc_free(cnxn->conv_funcs); cnxn->conv_funcs = 0; cnxn->conv_count = 0; } } static char conv_clear_doc[] = "clear_output_converters() --> None\n\n" "Remove all output converter functions."; static PyObject* Connection_conv_clear(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = (Connection*)self; _clear_conv(cnxn); Py_RETURN_NONE; } static int Connection_clear(PyObject* self) { // Internal method for closing the connection. (Not called close so it isn't confused with the external close // method.) Connection* cnxn = (Connection*)self; if (cnxn->hdbc != SQL_NULL_HANDLE) { // REVIEW: Release threads? (But make sure you zero out hdbc *first*! TRACE("cnxn.clear cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); Py_BEGIN_ALLOW_THREADS if (cnxn->nAutoCommit == SQL_AUTOCOMMIT_OFF) SQLEndTran(SQL_HANDLE_DBC, cnxn->hdbc, SQL_ROLLBACK); SQLDisconnect(cnxn->hdbc); SQLFreeHandle(SQL_HANDLE_DBC, cnxn->hdbc); Py_END_ALLOW_THREADS cnxn->hdbc = SQL_NULL_HANDLE; } Py_XDECREF(cnxn->searchescape); cnxn->searchescape = 0; _clear_conv(cnxn); return 0; } static void Connection_dealloc(PyObject* self) { Connection_clear(self); PyObject_Del(self); } static char close_doc[] = "Close the connection now (rather than whenever __del__ is called).\n" "\n" "The connection will be unusable from this point forward and a ProgrammingError\n" "will be raised if any operation is attempted with the connection. The same\n" "applies to all cursor objects trying to use the connection.\n" "\n" "Note that closing a connection without committing the changes first will cause\n" "an implicit rollback to be performed."; static PyObject* Connection_close(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; Connection_clear(self); Py_RETURN_NONE; } static PyObject* Connection_cursor(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; return (PyObject*)Cursor_New(cnxn); } static PyObject* Connection_execute(PyObject* self, PyObject* args) { PyObject* result = 0; Cursor* cursor; Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; cursor = Cursor_New(cnxn); if (!cursor) return 0; result = Cursor_execute((PyObject*)cursor, args); Py_DECREF((PyObject*)cursor); return result; } enum { GI_YESNO, GI_STRING, GI_UINTEGER, GI_USMALLINT, }; struct GetInfoType { SQLUSMALLINT infotype; int datatype; // GI_XXX }; static const GetInfoType aInfoTypes[] = { { SQL_ACCESSIBLE_PROCEDURES, GI_YESNO }, { SQL_ACCESSIBLE_TABLES, GI_YESNO }, { SQL_ACTIVE_ENVIRONMENTS, GI_USMALLINT }, { SQL_AGGREGATE_FUNCTIONS, GI_UINTEGER }, { SQL_ALTER_DOMAIN, GI_UINTEGER }, { SQL_ALTER_TABLE, GI_UINTEGER }, { SQL_ASYNC_MODE, GI_UINTEGER }, { SQL_BATCH_ROW_COUNT, GI_UINTEGER }, { SQL_BATCH_SUPPORT, GI_UINTEGER }, { SQL_BOOKMARK_PERSISTENCE, GI_UINTEGER }, { SQL_CATALOG_LOCATION, GI_USMALLINT }, { SQL_CATALOG_NAME, GI_YESNO }, { SQL_CATALOG_NAME_SEPARATOR, GI_STRING }, { SQL_CATALOG_TERM, GI_STRING }, { SQL_CATALOG_USAGE, GI_UINTEGER }, { SQL_COLLATION_SEQ, GI_STRING }, { SQL_COLUMN_ALIAS, GI_YESNO }, { SQL_CONCAT_NULL_BEHAVIOR, GI_USMALLINT }, { SQL_CONVERT_FUNCTIONS, GI_UINTEGER }, { SQL_CONVERT_VARCHAR, GI_UINTEGER }, { SQL_CORRELATION_NAME, GI_USMALLINT }, { SQL_CREATE_ASSERTION, GI_UINTEGER }, { SQL_CREATE_CHARACTER_SET, GI_UINTEGER }, { SQL_CREATE_COLLATION, GI_UINTEGER }, { SQL_CREATE_DOMAIN, GI_UINTEGER }, { SQL_CREATE_SCHEMA, GI_UINTEGER }, { SQL_CREATE_TABLE, GI_UINTEGER }, { SQL_CREATE_TRANSLATION, GI_UINTEGER }, { SQL_CREATE_VIEW, GI_UINTEGER }, { SQL_CURSOR_COMMIT_BEHAVIOR, GI_USMALLINT }, { SQL_CURSOR_ROLLBACK_BEHAVIOR, GI_USMALLINT }, { SQL_DATABASE_NAME, GI_STRING }, { SQL_DATA_SOURCE_NAME, GI_STRING }, { SQL_DATA_SOURCE_READ_ONLY, GI_YESNO }, { SQL_DATETIME_LITERALS, GI_UINTEGER }, { SQL_DBMS_NAME, GI_STRING }, { SQL_DBMS_VER, GI_STRING }, { SQL_DDL_INDEX, GI_UINTEGER }, { SQL_DEFAULT_TXN_ISOLATION, GI_UINTEGER }, { SQL_DESCRIBE_PARAMETER, GI_YESNO }, { SQL_DM_VER, GI_STRING }, { SQL_DRIVER_NAME, GI_STRING }, { SQL_DRIVER_ODBC_VER, GI_STRING }, { SQL_DRIVER_VER, GI_STRING }, { SQL_DROP_ASSERTION, GI_UINTEGER }, { SQL_DROP_CHARACTER_SET, GI_UINTEGER }, { SQL_DROP_COLLATION, GI_UINTEGER }, { SQL_DROP_DOMAIN, GI_UINTEGER }, { SQL_DROP_SCHEMA, GI_UINTEGER }, { SQL_DROP_TABLE, GI_UINTEGER }, { SQL_DROP_TRANSLATION, GI_UINTEGER }, { SQL_DROP_VIEW, GI_UINTEGER }, { SQL_DYNAMIC_CURSOR_ATTRIBUTES1, GI_UINTEGER }, { SQL_DYNAMIC_CURSOR_ATTRIBUTES2, GI_UINTEGER }, { SQL_EXPRESSIONS_IN_ORDERBY, GI_YESNO }, { SQL_FILE_USAGE, GI_USMALLINT }, { SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1, GI_UINTEGER }, { SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2, GI_UINTEGER }, { SQL_GETDATA_EXTENSIONS, GI_UINTEGER }, { SQL_GROUP_BY, GI_USMALLINT }, { SQL_IDENTIFIER_CASE, GI_USMALLINT }, { SQL_IDENTIFIER_QUOTE_CHAR, GI_STRING }, { SQL_INDEX_KEYWORDS, GI_UINTEGER }, { SQL_INFO_SCHEMA_VIEWS, GI_UINTEGER }, { SQL_INSERT_STATEMENT, GI_UINTEGER }, { SQL_INTEGRITY, GI_YESNO }, { SQL_KEYSET_CURSOR_ATTRIBUTES1, GI_UINTEGER }, { SQL_KEYSET_CURSOR_ATTRIBUTES2, GI_UINTEGER }, { SQL_KEYWORDS, GI_STRING }, { SQL_LIKE_ESCAPE_CLAUSE, GI_YESNO }, { SQL_MAX_ASYNC_CONCURRENT_STATEMENTS, GI_UINTEGER }, { SQL_MAX_BINARY_LITERAL_LEN, GI_UINTEGER }, { SQL_MAX_CATALOG_NAME_LEN, GI_USMALLINT }, { SQL_MAX_CHAR_LITERAL_LEN, GI_UINTEGER }, { SQL_MAX_COLUMNS_IN_GROUP_BY, GI_USMALLINT }, { SQL_MAX_COLUMNS_IN_INDEX, GI_USMALLINT }, { SQL_MAX_COLUMNS_IN_ORDER_BY, GI_USMALLINT }, { SQL_MAX_COLUMNS_IN_SELECT, GI_USMALLINT }, { SQL_MAX_COLUMNS_IN_TABLE, GI_USMALLINT }, { SQL_MAX_COLUMN_NAME_LEN, GI_USMALLINT }, { SQL_MAX_CONCURRENT_ACTIVITIES, GI_USMALLINT }, { SQL_MAX_CURSOR_NAME_LEN, GI_USMALLINT }, { SQL_MAX_DRIVER_CONNECTIONS, GI_USMALLINT }, { SQL_MAX_IDENTIFIER_LEN, GI_USMALLINT }, { SQL_MAX_INDEX_SIZE, GI_UINTEGER }, { SQL_MAX_PROCEDURE_NAME_LEN, GI_USMALLINT }, { SQL_MAX_ROW_SIZE, GI_UINTEGER }, { SQL_MAX_ROW_SIZE_INCLUDES_LONG, GI_YESNO }, { SQL_MAX_SCHEMA_NAME_LEN, GI_USMALLINT }, { SQL_MAX_STATEMENT_LEN, GI_UINTEGER }, { SQL_MAX_TABLES_IN_SELECT, GI_USMALLINT }, { SQL_MAX_TABLE_NAME_LEN, GI_USMALLINT }, { SQL_MAX_USER_NAME_LEN, GI_USMALLINT }, { SQL_MULTIPLE_ACTIVE_TXN, GI_YESNO }, { SQL_MULT_RESULT_SETS, GI_YESNO }, { SQL_NEED_LONG_DATA_LEN, GI_YESNO }, { SQL_NON_NULLABLE_COLUMNS, GI_USMALLINT }, { SQL_NULL_COLLATION, GI_USMALLINT }, { SQL_NUMERIC_FUNCTIONS, GI_UINTEGER }, { SQL_ODBC_INTERFACE_CONFORMANCE, GI_UINTEGER }, { SQL_ODBC_VER, GI_STRING }, { SQL_OJ_CAPABILITIES, GI_UINTEGER }, { SQL_ORDER_BY_COLUMNS_IN_SELECT, GI_YESNO }, { SQL_PARAM_ARRAY_ROW_COUNTS, GI_UINTEGER }, { SQL_PARAM_ARRAY_SELECTS, GI_UINTEGER }, { SQL_PROCEDURES, GI_YESNO }, { SQL_PROCEDURE_TERM, GI_STRING }, { SQL_QUOTED_IDENTIFIER_CASE, GI_USMALLINT }, { SQL_ROW_UPDATES, GI_YESNO }, { SQL_SCHEMA_TERM, GI_STRING }, { SQL_SCHEMA_USAGE, GI_UINTEGER }, { SQL_SCROLL_OPTIONS, GI_UINTEGER }, { SQL_SEARCH_PATTERN_ESCAPE, GI_STRING }, { SQL_SERVER_NAME, GI_STRING }, { SQL_SPECIAL_CHARACTERS, GI_STRING }, { SQL_SQL92_DATETIME_FUNCTIONS, GI_UINTEGER }, { SQL_SQL92_FOREIGN_KEY_DELETE_RULE, GI_UINTEGER }, { SQL_SQL92_FOREIGN_KEY_UPDATE_RULE, GI_UINTEGER }, { SQL_SQL92_GRANT, GI_UINTEGER }, { SQL_SQL92_NUMERIC_VALUE_FUNCTIONS, GI_UINTEGER }, { SQL_SQL92_PREDICATES, GI_UINTEGER }, { SQL_SQL92_RELATIONAL_JOIN_OPERATORS, GI_UINTEGER }, { SQL_SQL92_REVOKE, GI_UINTEGER }, { SQL_SQL92_ROW_VALUE_CONSTRUCTOR, GI_UINTEGER }, { SQL_SQL92_STRING_FUNCTIONS, GI_UINTEGER }, { SQL_SQL92_VALUE_EXPRESSIONS, GI_UINTEGER }, { SQL_SQL_CONFORMANCE, GI_UINTEGER }, { SQL_STANDARD_CLI_CONFORMANCE, GI_UINTEGER }, { SQL_STATIC_CURSOR_ATTRIBUTES1, GI_UINTEGER }, { SQL_STATIC_CURSOR_ATTRIBUTES2, GI_UINTEGER }, { SQL_STRING_FUNCTIONS, GI_UINTEGER }, { SQL_SUBQUERIES, GI_UINTEGER }, { SQL_SYSTEM_FUNCTIONS, GI_UINTEGER }, { SQL_TABLE_TERM, GI_STRING }, { SQL_TIMEDATE_ADD_INTERVALS, GI_UINTEGER }, { SQL_TIMEDATE_DIFF_INTERVALS, GI_UINTEGER }, { SQL_TIMEDATE_FUNCTIONS, GI_UINTEGER }, { SQL_TXN_CAPABLE, GI_USMALLINT }, { SQL_TXN_ISOLATION_OPTION, GI_UINTEGER }, { SQL_UNION, GI_UINTEGER }, { SQL_USER_NAME, GI_STRING }, { SQL_XOPEN_CLI_YEAR, GI_STRING }, }; static PyObject* Connection_getinfo(PyObject* self, PyObject* args) { Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; unsigned long infotype; if (!PyArg_ParseTuple(args, "k", &infotype)) return 0; unsigned int i = 0; for (; i < _countof(aInfoTypes); i++) { if (aInfoTypes[i].infotype == infotype) break; } if (i == _countof(aInfoTypes)) return RaiseErrorV(0, ProgrammingError, "Invalid getinfo value: %d", infotype); char szBuffer[0x1000]; SQLSMALLINT cch = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetInfo(cnxn->hdbc, (SQLUSMALLINT)infotype, szBuffer, sizeof(szBuffer), &cch); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLGetInfo", cnxn->hdbc, SQL_NULL_HANDLE); return 0; } PyObject* result = 0; switch (aInfoTypes[i].datatype) { case GI_YESNO: result = (szBuffer[0] == 'Y') ? Py_True : Py_False; Py_INCREF(result); break; case GI_STRING: result = PyString_FromStringAndSize(szBuffer, (Py_ssize_t)cch); break; case GI_UINTEGER: { SQLUINTEGER n = *(SQLUINTEGER*)szBuffer; // Does this work on PPC or do we need a union? #if PY_MAJOR_VERSION >= 3 result = PyLong_FromLong((long)n); #else if (n <= (SQLUINTEGER)PyInt_GetMax()) result = PyInt_FromLong((long)n); else result = PyLong_FromUnsignedLong(n); #endif break; } case GI_USMALLINT: result = PyInt_FromLong(*(SQLUSMALLINT*)szBuffer); break; } return result; } PyObject* Connection_endtrans(Connection* cnxn, SQLSMALLINT type) { // If called from Cursor.commit, it is possible that `cnxn` is deleted by another thread when we release them // below. (The cursor has had its reference incremented by the method it is calling, but nothing has incremented // the connections count. We could, but we really only need the HDBC.) HDBC hdbc = cnxn->hdbc; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLEndTran(SQL_HANDLE_DBC, hdbc, type); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLEndTran", hdbc, SQL_NULL_HANDLE); return 0; } Py_RETURN_NONE; } static PyObject* Connection_commit(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; TRACE("commit: cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); return Connection_endtrans(cnxn, SQL_COMMIT); } static PyObject* Connection_rollback(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; TRACE("rollback: cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); return Connection_endtrans(cnxn, SQL_ROLLBACK); } static char cursor_doc[] = "Return a new Cursor object using the connection."; static char execute_doc[] = "execute(sql, [params]) --> Cursor\n" "\n" "Create a new Cursor object, call its execute method, and return it. See\n" "Cursor.execute for more details.\n" "\n" "This is a convenience method that is not part of the DB API. Since a new\n" "Cursor is allocated by each call, this should not be used if more than one SQL\n" "statement needs to be executed."; static char commit_doc[] = "Commit any pending transaction to the database."; static char rollback_doc[] = "Causes the the database to roll back to the start of any pending transaction."; static char getinfo_doc[] = "getinfo(type) --> str | int | bool\n" "\n" "Calls SQLGetInfo, passing `type`, and returns the result formatted as a Python object."; PyObject* Connection_getautocommit(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; PyObject* result = (cnxn->nAutoCommit == SQL_AUTOCOMMIT_ON) ? Py_True : Py_False; Py_INCREF(result); return result; } static int Connection_setautocommit(PyObject* self, PyObject* value, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return -1; if (value == 0) { PyErr_SetString(PyExc_TypeError, "Cannot delete the autocommit attribute."); return -1; } uintptr_t nAutoCommit = PyObject_IsTrue(value) ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)nAutoCommit, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLSetConnectAttr", cnxn->hdbc, SQL_NULL_HANDLE); return -1; } cnxn->nAutoCommit = nAutoCommit; return 0; } static PyObject* Connection_getsearchescape(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn = (Connection*)self; if (!cnxn->searchescape) { char sz[8] = { 0 }; SQLSMALLINT cch = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetInfo(cnxn->hdbc, SQL_SEARCH_PATTERN_ESCAPE, &sz, _countof(sz), &cch); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLGetInfo", cnxn->hdbc, SQL_NULL_HANDLE); cnxn->searchescape = PyString_FromStringAndSize(sz, (Py_ssize_t)cch); } Py_INCREF(cnxn->searchescape); return cnxn->searchescape; } static PyObject* Connection_gettimeout(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; return PyInt_FromLong(cnxn->timeout); } static int Connection_settimeout(PyObject* self, PyObject* value, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return -1; if (value == 0) { PyErr_SetString(PyExc_TypeError, "Cannot delete the timeout attribute."); return -1; } intptr_t timeout = PyInt_AsLong(value); if (timeout == -1 && PyErr_Occurred()) return -1; if (timeout < 0) { PyErr_SetString(PyExc_ValueError, "Cannot set a negative timeout."); return -1; } SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_CONNECTION_TIMEOUT, (SQLPOINTER)timeout, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLSetConnectAttr", cnxn->hdbc, SQL_NULL_HANDLE); return -1; } cnxn->timeout = timeout; return 0; } static bool _add_converter(PyObject* self, SQLSMALLINT sqltype, PyObject* func) { Connection* cnxn = (Connection*)self; if (cnxn->conv_count) { // If the sqltype is already registered, replace the old conversion function with the new. for (int i = 0; i < cnxn->conv_count; i++) { if (cnxn->conv_types[i] == sqltype) { Py_XDECREF(cnxn->conv_funcs[i]); cnxn->conv_funcs[i] = func; Py_INCREF(func); return true; } } } int oldcount = cnxn->conv_count; SQLSMALLINT* oldtypes = cnxn->conv_types; PyObject** oldfuncs = cnxn->conv_funcs; int newcount = oldcount + 1; SQLSMALLINT* newtypes = (SQLSMALLINT*)pyodbc_malloc(sizeof(SQLSMALLINT) * newcount); PyObject** newfuncs = (PyObject**)pyodbc_malloc(sizeof(PyObject*) * newcount); if (newtypes == 0 || newfuncs == 0) { if (newtypes) pyodbc_free(newtypes); if (newfuncs) pyodbc_free(newfuncs); PyErr_NoMemory(); return false; } newtypes[0] = sqltype; newfuncs[0] = func; Py_INCREF(func); cnxn->conv_count = newcount; cnxn->conv_types = newtypes; cnxn->conv_funcs = newfuncs; if (oldcount != 0) { // copy old items memcpy(&newtypes[1], oldtypes, sizeof(int) * oldcount); memcpy(&newfuncs[1], oldfuncs, sizeof(PyObject*) * oldcount); pyodbc_free(oldtypes); pyodbc_free(oldfuncs); } return true; } static char conv_add_doc[] = "add_output_converter(sqltype, func) --> None\n" "\n" "Register an output converter function that will be called whenever a value with\n" "the given SQL type is read from the database.\n" "\n" "sqltype\n" " The integer SQL type value to convert, which can be one of the defined\n" " standard constants (e.g. pyodbc.SQL_VARCHAR) or a database-specific value\n" " (e.g. -151 for the SQL Server 2008 geometry data type).\n" "\n" "func\n" " The converter function which will be called with a single parameter, the\n" " value, and should return the converted value. If the value is NULL, the\n" " parameter will be None. Otherwise it will be a Python string."; static PyObject* Connection_conv_add(PyObject* self, PyObject* args) { int sqltype; PyObject* func; if (!PyArg_ParseTuple(args, "iO", &sqltype, &func)) return 0; if (!_add_converter(self, (SQLSMALLINT)sqltype, func)) return 0; Py_RETURN_NONE; } static char enter_doc[] = "__enter__() -> self."; static PyObject* Connection_enter(PyObject* self, PyObject* args) { UNUSED(args); Py_INCREF(self); return self; } static char exit_doc[] = "__exit__(*excinfo) -> None. Commits the connection if necessary."; static PyObject* Connection_exit(PyObject* self, PyObject* args) { Connection* cnxn = (Connection*)self; // If an error has occurred, `args` will be a tuple of 3 values. Otherwise it will be a tuple of 3 `None`s. I(PyTuple_Check(args)); if (cnxn->nAutoCommit == SQL_AUTOCOMMIT_OFF && PyTuple_GetItem(args, 0) == Py_None) { SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLEndTran(SQL_HANDLE_DBC, cnxn->hdbc, SQL_COMMIT); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLEndTran(SQL_COMMIT)", cnxn->hdbc, SQL_NULL_HANDLE); } Py_RETURN_NONE; } static struct PyMethodDef Connection_methods[] = { { "cursor", Connection_cursor, METH_NOARGS, cursor_doc }, { "close", Connection_close, METH_NOARGS, close_doc }, { "execute", Connection_execute, METH_VARARGS, execute_doc }, { "commit", Connection_commit, METH_NOARGS, commit_doc }, { "rollback", Connection_rollback, METH_NOARGS, rollback_doc }, { "getinfo", Connection_getinfo, METH_VARARGS, getinfo_doc }, { "add_output_converter", Connection_conv_add, METH_VARARGS, conv_add_doc }, { "clear_output_converters", Connection_conv_clear, METH_NOARGS, conv_clear_doc }, { "__enter__", Connection_enter, METH_NOARGS, enter_doc }, { "__exit__", Connection_exit, METH_VARARGS, exit_doc }, { 0, 0, 0, 0 } }; static PyGetSetDef Connection_getseters[] = { { "searchescape", (getter)Connection_getsearchescape, 0, "The ODBC search pattern escape character, as returned by\n" "SQLGetInfo(SQL_SEARCH_PATTERN_ESCAPE). These are driver specific.", 0 }, { "autocommit", Connection_getautocommit, Connection_setautocommit, "Returns True if the connection is in autocommit mode; False otherwise.", 0 }, { "timeout", Connection_gettimeout, Connection_settimeout, "The timeout in seconds, zero means no timeout.", 0 }, { 0 } }; PyTypeObject ConnectionType = { PyVarObject_HEAD_INIT(0, 0) "pyodbc.Connection", // tp_name sizeof(Connection), // tp_basicsize 0, // tp_itemsize Connection_dealloc, // destructor tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare 0, // tp_repr 0, // tp_as_number 0, // tp_as_sequence 0, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, // tp_flags connection_doc, // tp_doc 0, // tp_traverse 0, // tp_clear 0, // tp_richcompare 0, // tp_weaklistoffset 0, // tp_iter 0, // tp_iternext Connection_methods, // tp_methods 0, // tp_members Connection_getseters, // tp_getset 0, // tp_base 0, // tp_dict 0, // tp_descr_get 0, // tp_descr_set 0, // tp_dictoffset 0, // tp_init 0, // tp_alloc 0, // tp_new 0, // tp_free 0, // tp_is_gc 0, // tp_bases 0, // tp_mro 0, // tp_cache 0, // tp_subclasses 0, // tp_weaklist }; pyodbc-3.0.7/src/pyodbcmodule.h0000666000175000017500000000330412031131304015071 0ustar dokodoko /* * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _PYPGMODULE_H #define _PYPGMODULE_H extern PyObject* Error; extern PyObject* Warning; extern PyObject* InterfaceError; extern PyObject* DatabaseError; extern PyObject* InternalError; extern PyObject* OperationalError; extern PyObject* ProgrammingError; extern PyObject* IntegrityError; extern PyObject* DataError; extern PyObject* NotSupportedError; extern PyObject* null_binary; extern PyObject* decimal_type; inline bool PyDecimal_Check(PyObject* p) { return Py_TYPE(p) == (_typeobject*)decimal_type; } extern HENV henv; extern PyTypeObject RowType; extern PyTypeObject CursorType; extern PyTypeObject ConnectionType; // Thd pyodbc module. extern PyObject* pModule; inline bool lowercase() { return PyObject_GetAttrString(pModule, "lowercase") == Py_True; } extern Py_UNICODE chDecimal; #endif // _PYPGMODULE_H pyodbc-3.0.7/src/dbspecific.h0000666000175000017500000000130612031131304014476 0ustar dokodoko #ifndef DBSPECIFIC_H #define DBSPECIFIC_H // Items specific to databases. // // Obviously we'd like to minimize this, but if they are needed this file isolates them. I'd like for there to be a // single build of pyodbc on each platform and not have a bunch of defines for supporting different databases. // --------------------------------------------------------------------------------------------------------------------- // SQL Server // SQL Server 2005 xml type #define SQL_SS_XML -152 // SQL Server 2008 time type #define SQL_SS_TIME2 -154 struct SQL_SS_TIME2_STRUCT { SQLUSMALLINT hour; SQLUSMALLINT minute; SQLUSMALLINT second; SQLUINTEGER fraction; }; #endif // DBSPECIFIC_H pyodbc-3.0.7/src/connection.h0000666000175000017500000000642612146217440014567 0ustar dokodoko // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef CONNECTION_H #define CONNECTION_H struct Cursor; extern PyTypeObject ConnectionType; struct Connection { PyObject_HEAD // Set to SQL_NULL_HANDLE when the connection is closed. HDBC hdbc; // Will be SQL_AUTOCOMMIT_ON or SQL_AUTOCOMMIT_OFF. uintptr_t nAutoCommit; // The ODBC version the driver supports, from SQLGetInfo(DRIVER_ODBC_VER). This is set after connecting. char odbc_major; char odbc_minor; // The escape character from SQLGetInfo. This is not initialized until requested, so this may be zero! PyObject* searchescape; // Will be true if SQLDescribeParam is supported. If false, we'll have to guess but the user will not be able // to insert NULLs into binary columns. bool supports_describeparam; // The column size of datetime columns, obtained from SQLGetInfo(), used to determine the datetime precision. int datetime_precision; // If true, then the strings in the rows are returned as unicode objects. bool unicode_results; // The connection timeout in seconds. intptr_t timeout; // These are copied from cnxn info for performance and convenience. int varchar_maxlength; int wvarchar_maxlength; int binary_maxlength; bool need_long_data_len; // Output conversions. Maps from SQL type in conv_types to the converter function in conv_funcs. // // If conv_count is zero, conv_types and conv_funcs will also be zero. // // pyodbc uses this manual mapping for speed and portability. The STL collection classes use the new operator and // throw exceptions when out of memory. pyodbc does not use any exceptions. int conv_count; // how many items are in conv_types and conv_funcs. SQLSMALLINT* conv_types; // array of SQL_TYPEs to convert PyObject** conv_funcs; // array of Python functions }; #define Connection_Check(op) PyObject_TypeCheck(op, &ConnectionType) #define Connection_CheckExact(op) (Py_TYPE(op) == &ConnectionType) /* * Used by the module's connect function to create new connection objects. If unable to connect to the database, an * exception is set and zero is returned. */ PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi, bool fUnicodeResults, long timeout, bool fReadOnly); /* * Used by the Cursor to implement commit and rollback. */ PyObject* Connection_endtrans(Connection* cnxn, SQLSMALLINT type); #endif pyodbc-3.0.7/src/params.h0000666000175000017500000000036012031131304013665 0ustar dokodoko #ifndef PARAMS_H #define PARAMS_H bool Params_init(); struct Cursor; bool PrepareAndBind(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first); void FreeParameterData(Cursor* cur); void FreeParameterInfo(Cursor* cur); #endif pyodbc-3.0.7/src/wrapper.h0000666000175000017500000000272212146217440014103 0ustar dokodoko #ifndef _WRAPPER_H_ #define _WRAPPER_H_ class Object { protected: PyObject* p; // GCC freaks out if these are private, but it doesn't use them (?) // Object(const Object& illegal); // void operator=(const Object& illegal); public: Object(PyObject* _p = 0) { p = _p; } ~Object() { Py_XDECREF(p); } Object& operator=(PyObject* pNew) { Py_XDECREF(p); p = pNew; return *this; } bool IsValid() const { return p != 0; } bool Attach(PyObject* _p) { // Returns true if the new pointer is non-zero. Py_XDECREF(p); p = _p; return (_p != 0); } PyObject* Detach() { PyObject* pT = p; p = 0; return pT; } operator PyObject*() { return p; } PyObject* Get() { return p; } }; class Tuple : public Object { public: Tuple(PyObject* _p = 0) : Object(_p) { } operator PyTupleObject*() { return (PyTupleObject*)p; } PyObject*& operator[](int i) { I(p != 0); return PyTuple_GET_ITEM(p, i); } Py_ssize_t size() { return p ? PyTuple_GET_SIZE(p) : 0; } }; #ifdef WINVER struct RegKey { HKEY hkey; RegKey() { hkey = 0; } ~RegKey() { if (hkey != 0) RegCloseKey(hkey); } operator HKEY() { return hkey; } }; #endif #endif // _WRAPPER_H_ pyodbc-3.0.7/src/sqlwchar.cpp0000666000175000017500000001315512031131304014567 0ustar dokodoko #include "pyodbc.h" #include "sqlwchar.h" #include "wrapper.h" Py_ssize_t SQLWCHAR_SIZE = sizeof(SQLWCHAR); #ifdef HAVE_WCHAR_H static int WCHAR_T_SIZE = sizeof(wchar_t); #endif inline Py_UNICODE CalculateMaxSQL() { if (SQLWCHAR_SIZE >= Py_UNICODE_SIZE) return 0; Py_UNICODE m = 0; for (unsigned int i = 0; i < sizeof(SQLWCHAR); i++) { m <<= 8; m |= 0xFF; } return m; } // If SQLWCHAR is larger than Py_UNICODE, this is the largest value that can be held in a Py_UNICODE. Because it is // stored in a Py_UNICODE, it is undefined when sizeof(SQLWCHAR) <= sizeof(Py_UNICODE). static Py_UNICODE MAX_SQLWCHAR = CalculateMaxSQL(); // If SQLWCHAR is larger than Py_UNICODE, this is the largest value that can be held in a Py_UNICODE. Because it is // stored in a Py_UNICODE, it is undefined when sizeof(SQLWCHAR) <= sizeof(Py_UNICODE). static const SQLWCHAR MAX_PY_UNICODE = (SQLWCHAR)PyUnicode_GetMax(); static bool sqlwchar_copy(SQLWCHAR* pdest, const Py_UNICODE* psrc, Py_ssize_t len) { // Copies a Python Unicode string to a SQLWCHAR buffer. Note that this does copy the NULL terminator, but `len` // should not include it. That is, it copies (len + 1) characters. if (Py_UNICODE_SIZE == SQLWCHAR_SIZE) { memcpy(pdest, psrc, sizeof(SQLWCHAR) * (len + 1)); } else { if (SQLWCHAR_SIZE < Py_UNICODE_SIZE) { for (int i = 0; i < len; i++) { if ((Py_ssize_t)psrc[i] > MAX_SQLWCHAR) { PyErr_Format(PyExc_ValueError, "Cannot convert from Unicode %zd to SQLWCHAR. Value is too large.", (Py_ssize_t)psrc[i]); return false; } } } for (int i = 0; i <= len; i++) // ('<=' to include the NULL) pdest[i] = (SQLWCHAR)psrc[i]; } return true; } SQLWChar::SQLWChar(PyObject* o) { // Converts from a Python Unicode string. pch = 0; len = 0; owns_memory = false; Convert(o); } void SQLWChar::Free() { if (pch && owns_memory) pyodbc_free(pch); pch = 0; len = 0; owns_memory = false; } bool SQLWChar::Convert(PyObject* o) { Free(); if (!PyUnicode_Check(o)) { PyErr_SetString(PyExc_TypeError, "Unicode required"); return false; } Py_UNICODE* pU = (Py_UNICODE*)PyUnicode_AS_UNICODE(o); Py_ssize_t lenT = PyUnicode_GET_SIZE(o); if (SQLWCHAR_SIZE == Py_UNICODE_SIZE) { // The ideal case - SQLWCHAR and Py_UNICODE are the same, so we point into the Unicode object. pch = (SQLWCHAR*)pU; len = lenT; owns_memory = false; return true; } else { SQLWCHAR* pchT = (SQLWCHAR*)pyodbc_malloc(sizeof(SQLWCHAR) * (lenT + 1)); if (pchT == 0) { PyErr_NoMemory(); return false; } if (!sqlwchar_copy(pchT, pU, lenT)) { pyodbc_free(pchT); return false; } pch = pchT; len = lenT; owns_memory = true; return true; } } PyObject* PyUnicode_FromSQLWCHAR(const SQLWCHAR* sz, Py_ssize_t cch) { // Create a Python Unicode object from a zero-terminated SQLWCHAR. if (SQLWCHAR_SIZE == Py_UNICODE_SIZE) { // The ODBC Unicode and Python Unicode types are the same size. Cast the ODBC type to the Python type and use // a fast function. return PyUnicode_FromUnicode((const Py_UNICODE*)sz, cch); } #ifdef HAVE_WCHAR_H if (WCHAR_T_SIZE == SQLWCHAR_SIZE) { // The ODBC Unicode is the same as wchar_t. Python provides a function for that. return PyUnicode_FromWideChar((const wchar_t*)sz, cch); } #endif // There is no conversion, so we will copy it ourselves with a simple cast. if (Py_UNICODE_SIZE < SQLWCHAR_SIZE) { // We are casting from a larger size to a smaller one, so we'll make sure they all fit. for (Py_ssize_t i = 0; i < cch; i++) { if (((Py_ssize_t)sz[i]) > MAX_PY_UNICODE) { PyErr_Format(PyExc_ValueError, "Cannot convert from SQLWCHAR %zd to Unicode. Value is too large.", (Py_ssize_t)sz[i]); return 0; } } } Object result(PyUnicode_FromUnicode(0, cch)); if (!result) return 0; Py_UNICODE* pch = PyUnicode_AS_UNICODE(result.Get()); for (Py_ssize_t i = 0; i < cch; i++) pch[i] = (Py_UNICODE)sz[i]; return result.Detach(); } void SQLWChar::dump() { printf("sqlwchar=%ld pch=%p len=%ld owns=%d\n", sizeof(SQLWCHAR), pch, len, (int)owns_memory); if (pch && len) { Py_ssize_t i = 0; while (i < len) { Py_ssize_t stop = min(i + 10, len); for (Py_ssize_t x = i; x < stop; x++) { for (int byteindex = (int)sizeof(SQLWCHAR)-1; byteindex >= 0; byteindex--) { int byte = (pch[x] >> (byteindex * 8)) & 0xFF; printf("%02x", byte); } printf(" "); } for (Py_ssize_t x = i; x < stop; x++) printf("%c", (char)pch[x]); printf("\n"); i += 10; } printf("\n\n"); } } SQLWCHAR* SQLWCHAR_FromUnicode(const Py_UNICODE* pch, Py_ssize_t len) { SQLWCHAR* p = (SQLWCHAR*)pyodbc_malloc(sizeof(SQLWCHAR) * (len+1)); if (p != 0) { if (!sqlwchar_copy(p, pch, len)) { pyodbc_free(p); p = 0; } } return p; } pyodbc-3.0.7/src/params.cpp0000666000175000017500000006543512146217440014253 0ustar dokodoko #include "pyodbc.h" #include "pyodbcmodule.h" #include "params.h" #include "cursor.h" #include "connection.h" #include "buffer.h" #include "wrapper.h" #include "errors.h" #include "dbspecific.h" #include "sqlwchar.h" #include inline Connection* GetConnection(Cursor* cursor) { return (Connection*)cursor->cnxn; } static bool GetParamType(Cursor* cur, Py_ssize_t iParam, SQLSMALLINT& type); static void FreeInfos(ParamInfo* a, Py_ssize_t count) { for (Py_ssize_t i = 0; i < count; i++) { if (a[i].allocated) pyodbc_free(a[i].ParameterValuePtr); Py_XDECREF(a[i].pParam); } pyodbc_free(a); } #define _MAKESTR(n) case n: return #n static const char* SqlTypeName(SQLSMALLINT n) { switch (n) { _MAKESTR(SQL_UNKNOWN_TYPE); _MAKESTR(SQL_CHAR); _MAKESTR(SQL_VARCHAR); _MAKESTR(SQL_LONGVARCHAR); _MAKESTR(SQL_NUMERIC); _MAKESTR(SQL_DECIMAL); _MAKESTR(SQL_INTEGER); _MAKESTR(SQL_SMALLINT); _MAKESTR(SQL_FLOAT); _MAKESTR(SQL_REAL); _MAKESTR(SQL_DOUBLE); _MAKESTR(SQL_DATETIME); _MAKESTR(SQL_WCHAR); _MAKESTR(SQL_WVARCHAR); _MAKESTR(SQL_WLONGVARCHAR); _MAKESTR(SQL_TYPE_DATE); _MAKESTR(SQL_TYPE_TIME); _MAKESTR(SQL_TYPE_TIMESTAMP); _MAKESTR(SQL_SS_TIME2); _MAKESTR(SQL_SS_XML); _MAKESTR(SQL_BINARY); _MAKESTR(SQL_VARBINARY); _MAKESTR(SQL_LONGVARBINARY); } return "unknown"; } static const char* CTypeName(SQLSMALLINT n) { switch (n) { _MAKESTR(SQL_C_CHAR); _MAKESTR(SQL_C_WCHAR); _MAKESTR(SQL_C_LONG); _MAKESTR(SQL_C_SHORT); _MAKESTR(SQL_C_FLOAT); _MAKESTR(SQL_C_DOUBLE); _MAKESTR(SQL_C_NUMERIC); _MAKESTR(SQL_C_DEFAULT); _MAKESTR(SQL_C_DATE); _MAKESTR(SQL_C_TIME); _MAKESTR(SQL_C_TIMESTAMP); _MAKESTR(SQL_C_TYPE_DATE); _MAKESTR(SQL_C_TYPE_TIME); _MAKESTR(SQL_C_TYPE_TIMESTAMP); _MAKESTR(SQL_C_INTERVAL_YEAR); _MAKESTR(SQL_C_INTERVAL_MONTH); _MAKESTR(SQL_C_INTERVAL_DAY); _MAKESTR(SQL_C_INTERVAL_HOUR); _MAKESTR(SQL_C_INTERVAL_MINUTE); _MAKESTR(SQL_C_INTERVAL_SECOND); _MAKESTR(SQL_C_INTERVAL_YEAR_TO_MONTH); _MAKESTR(SQL_C_INTERVAL_DAY_TO_HOUR); _MAKESTR(SQL_C_INTERVAL_DAY_TO_MINUTE); _MAKESTR(SQL_C_INTERVAL_DAY_TO_SECOND); _MAKESTR(SQL_C_INTERVAL_HOUR_TO_MINUTE); _MAKESTR(SQL_C_INTERVAL_HOUR_TO_SECOND); _MAKESTR(SQL_C_INTERVAL_MINUTE_TO_SECOND); _MAKESTR(SQL_C_BINARY); _MAKESTR(SQL_C_BIT); _MAKESTR(SQL_C_SBIGINT); _MAKESTR(SQL_C_UBIGINT); _MAKESTR(SQL_C_TINYINT); _MAKESTR(SQL_C_SLONG); _MAKESTR(SQL_C_SSHORT); _MAKESTR(SQL_C_STINYINT); _MAKESTR(SQL_C_ULONG); _MAKESTR(SQL_C_USHORT); _MAKESTR(SQL_C_UTINYINT); _MAKESTR(SQL_C_GUID); } return "unknown"; } static bool GetNullInfo(Cursor* cur, Py_ssize_t index, ParamInfo& info) { if (!GetParamType(cur, index, info.ParameterType)) return false; info.ValueType = SQL_C_DEFAULT; info.ColumnSize = 1; info.StrLen_or_Ind = SQL_NULL_DATA; return true; } static bool GetNullBinaryInfo(Cursor* cur, Py_ssize_t index, ParamInfo& info) { info.ValueType = SQL_C_BINARY; info.ParameterType = SQL_BINARY; info.ColumnSize = 1; info.ParameterValuePtr = 0; info.StrLen_or_Ind = SQL_NULL_DATA; return true; } static bool GetBytesInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { // In Python 2, a bytes object (ANSI string) is passed as varchar. In Python 3, it is passed as binary. Py_ssize_t len = PyBytes_GET_SIZE(param); #if PY_MAJOR_VERSION >= 3 info.ValueType = SQL_C_BINARY; info.ColumnSize = (SQLUINTEGER)max(len, 1); if (len <= cur->cnxn->binary_maxlength) { info.ParameterType = SQL_VARBINARY; info.StrLen_or_Ind = len; info.ParameterValuePtr = PyBytes_AS_STRING(param); } else { // Too long to pass all at once, so we'll provide the data at execute. info.ParameterType = SQL_LONGVARBINARY; info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; info.ParameterValuePtr = param; } #else info.ValueType = SQL_C_CHAR; info.ColumnSize = (SQLUINTEGER)max(len, 1); if (len <= cur->cnxn->varchar_maxlength) { info.ParameterType = SQL_VARCHAR; info.StrLen_or_Ind = len; info.ParameterValuePtr = PyBytes_AS_STRING(param); } else { // Too long to pass all at once, so we'll provide the data at execute. info.ParameterType = SQL_LONGVARCHAR; info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; info.ParameterValuePtr = param; } #endif return true; } static bool GetUnicodeInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { Py_UNICODE* pch = PyUnicode_AsUnicode(param); Py_ssize_t len = PyUnicode_GET_SIZE(param); info.ValueType = SQL_C_WCHAR; info.ColumnSize = (SQLUINTEGER)max(len, 1); if (len <= cur->cnxn->wvarchar_maxlength) { if (SQLWCHAR_SIZE == Py_UNICODE_SIZE) { info.ParameterValuePtr = pch; } else { // SQLWCHAR and Py_UNICODE are not the same size, so we need to allocate and copy a buffer. if (len > 0) { info.ParameterValuePtr = SQLWCHAR_FromUnicode(pch, len); if (info.ParameterValuePtr == 0) return false; info.allocated = true; } else { info.ParameterValuePtr = pch; } } info.ParameterType = SQL_WVARCHAR; info.StrLen_or_Ind = (SQLINTEGER)(len * sizeof(SQLWCHAR)); } else { // Too long to pass all at once, so we'll provide the data at execute. info.ParameterType = SQL_WLONGVARCHAR; info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len * sizeof(SQLWCHAR)) : SQL_DATA_AT_EXEC; info.ParameterValuePtr = param; } return true; } static bool GetBooleanInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.ValueType = SQL_C_BIT; info.ParameterType = SQL_BIT; info.StrLen_or_Ind = 1; info.Data.ch = (unsigned char)(param == Py_True ? 1 : 0); info.ParameterValuePtr = &info.Data.ch; return true; } static bool GetDateTimeInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.Data.timestamp.year = (SQLSMALLINT) PyDateTime_GET_YEAR(param); info.Data.timestamp.month = (SQLUSMALLINT)PyDateTime_GET_MONTH(param); info.Data.timestamp.day = (SQLUSMALLINT)PyDateTime_GET_DAY(param); info.Data.timestamp.hour = (SQLUSMALLINT)PyDateTime_DATE_GET_HOUR(param); info.Data.timestamp.minute = (SQLUSMALLINT)PyDateTime_DATE_GET_MINUTE(param); info.Data.timestamp.second = (SQLUSMALLINT)PyDateTime_DATE_GET_SECOND(param); // SQL Server chokes if the fraction has more data than the database supports. We expect other databases to be the // same, so we reduce the value to what the database supports. http://support.microsoft.com/kb/263872 int precision = ((Connection*)cur->cnxn)->datetime_precision - 20; // (20 includes a separating period) if (precision <= 0) { info.Data.timestamp.fraction = 0; } else { info.Data.timestamp.fraction = (SQLUINTEGER)(PyDateTime_DATE_GET_MICROSECOND(param) * 1000); // 1000 == micro -> nano // (How many leading digits do we want to keep? With SQL Server 2005, this should be 3: 123000000) int keep = (int)pow(10.0, 9-min(9, precision)); info.Data.timestamp.fraction = info.Data.timestamp.fraction / keep * keep; info.DecimalDigits = (SQLSMALLINT)precision; } info.ValueType = SQL_C_TIMESTAMP; info.ParameterType = SQL_TIMESTAMP; info.ColumnSize = (SQLUINTEGER)((Connection*)cur->cnxn)->datetime_precision; info.StrLen_or_Ind = sizeof(TIMESTAMP_STRUCT); info.ParameterValuePtr = &info.Data.timestamp; return true; } static bool GetDateInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.Data.date.year = (SQLSMALLINT) PyDateTime_GET_YEAR(param); info.Data.date.month = (SQLUSMALLINT)PyDateTime_GET_MONTH(param); info.Data.date.day = (SQLUSMALLINT)PyDateTime_GET_DAY(param); info.ValueType = SQL_C_TYPE_DATE; info.ParameterType = SQL_TYPE_DATE; info.ColumnSize = 10; info.ParameterValuePtr = &info.Data.date; info.StrLen_or_Ind = sizeof(DATE_STRUCT); return true; } static bool GetTimeInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.Data.time.hour = (SQLUSMALLINT)PyDateTime_TIME_GET_HOUR(param); info.Data.time.minute = (SQLUSMALLINT)PyDateTime_TIME_GET_MINUTE(param); info.Data.time.second = (SQLUSMALLINT)PyDateTime_TIME_GET_SECOND(param); info.ValueType = SQL_C_TYPE_TIME; info.ParameterType = SQL_TYPE_TIME; info.ColumnSize = 8; info.ParameterValuePtr = &info.Data.time; info.StrLen_or_Ind = sizeof(TIME_STRUCT); return true; } #if PY_MAJOR_VERSION < 3 static bool GetIntInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.Data.l = PyInt_AsLong(param); #if LONG_BIT == 64 info.ValueType = SQL_C_SBIGINT; info.ParameterType = SQL_BIGINT; #elif LONG_BIT == 32 info.ValueType = SQL_C_LONG; info.ParameterType = SQL_INTEGER; #else #error Unexpected LONG_BIT value #endif info.ParameterValuePtr = &info.Data.l; return true; } #endif static bool GetLongInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { // TODO: Overflow? info.Data.i64 = (INT64)PyLong_AsLongLong(param); info.ValueType = SQL_C_SBIGINT; info.ParameterType = SQL_BIGINT; info.ParameterValuePtr = &info.Data.i64; return true; } static bool GetFloatInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { // TODO: Overflow? info.Data.dbl = PyFloat_AsDouble(param); info.ValueType = SQL_C_DOUBLE; info.ParameterType = SQL_DOUBLE; info.ParameterValuePtr = &info.Data.dbl; info.ColumnSize = 15; return true; } static char* CreateDecimalString(long sign, PyObject* digits, long exp) { long count = (long)PyTuple_GET_SIZE(digits); char* pch; long len; if (exp >= 0) { // (1 2 3) exp = 2 --> '12300' len = sign + count + exp + 1; // 1: NULL pch = (char*)pyodbc_malloc((size_t)len); if (pch) { char* p = pch; if (sign) *p++ = '-'; for (long i = 0; i < count; i++) *p++ = (char)('0' + PyInt_AS_LONG(PyTuple_GET_ITEM(digits, i))); for (long i = 0; i < exp; i++) *p++ = '0'; *p = 0; } } else if (-exp < count) { // (1 2 3) exp = -2 --> 1.23 : prec = 3, scale = 2 len = sign + count + 2; // 2: decimal + NULL pch = (char*)pyodbc_malloc((size_t)len); if (pch) { char* p = pch; if (sign) *p++ = '-'; int i = 0; for (; i < (count + exp); i++) *p++ = (char)('0' + PyInt_AS_LONG(PyTuple_GET_ITEM(digits, i))); *p++ = '.'; for (; i < count; i++) *p++ = (char)('0' + PyInt_AS_LONG(PyTuple_GET_ITEM(digits, i))); *p++ = 0; } } else { // (1 2 3) exp = -5 --> 0.00123 : prec = 5, scale = 5 len = sign + -exp + 3; // 3: leading zero + decimal + NULL pch = (char*)pyodbc_malloc((size_t)len); if (pch) { char* p = pch; if (sign) *p++ = '-'; *p++ = '0'; *p++ = '.'; for (int i = 0; i < -(exp + count); i++) *p++ = '0'; for (int i = 0; i < count; i++) *p++ = (char)('0' + PyInt_AS_LONG(PyTuple_GET_ITEM(digits, i))); *p++ = 0; } } I(pch == 0 || (int)(strlen(pch) + 1) == len); return pch; } static bool GetDecimalInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { // The NUMERIC structure never works right with SQL Server and probably a lot of other drivers. We'll bind as a // string. Unfortunately, the Decimal class doesn't seem to have a way to force it to return a string without // exponents, so we'll have to build it ourselves. Object t = PyObject_CallMethod(param, "as_tuple", 0); if (!t) return false; long sign = PyInt_AsLong(PyTuple_GET_ITEM(t.Get(), 0)); PyObject* digits = PyTuple_GET_ITEM(t.Get(), 1); long exp = PyInt_AsLong(PyTuple_GET_ITEM(t.Get(), 2)); Py_ssize_t count = PyTuple_GET_SIZE(digits); info.ValueType = SQL_C_CHAR; info.ParameterType = SQL_NUMERIC; if (exp >= 0) { // (1 2 3) exp = 2 --> '12300' info.ColumnSize = (SQLUINTEGER)count + exp; info.DecimalDigits = 0; } else if (-exp <= count) { // (1 2 3) exp = -2 --> 1.23 : prec = 3, scale = 2 info.ColumnSize = (SQLUINTEGER)count; info.DecimalDigits = (SQLSMALLINT)-exp; } else { // (1 2 3) exp = -5 --> 0.00123 : prec = 5, scale = 5 info.ColumnSize = (SQLUINTEGER)(count + (-exp)); info.DecimalDigits = (SQLSMALLINT)info.ColumnSize; } I(info.ColumnSize >= (SQLULEN)info.DecimalDigits); info.ParameterValuePtr = CreateDecimalString(sign, digits, exp); if (!info.ParameterValuePtr) { PyErr_NoMemory(); return false; } info.allocated = true; info.StrLen_or_Ind = (SQLINTEGER)strlen((char*)info.ParameterValuePtr); return true; } #if PY_MAJOR_VERSION < 3 static bool GetBufferInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.ValueType = SQL_C_BINARY; const char* pb; Py_ssize_t cb = PyBuffer_GetMemory(param, &pb); if (cb != -1 && cb <= cur->cnxn->binary_maxlength) { // There is one segment, so we can bind directly into the buffer object. info.ParameterType = SQL_VARBINARY; info.ParameterValuePtr = (SQLPOINTER)pb; info.BufferLength = cb; info.ColumnSize = (SQLUINTEGER)max(cb, 1); info.StrLen_or_Ind = cb; } else { // There are multiple segments, so we'll provide the data at execution time. Pass the PyObject pointer as // the parameter value which will be pased back to us when the data is needed. (If we release threads, we // need to up the refcount!) info.ParameterType = SQL_LONGVARBINARY; info.ParameterValuePtr = param; info.ColumnSize = (SQLUINTEGER)PyBuffer_Size(param); info.BufferLength = sizeof(PyObject*); // How big is ParameterValuePtr; ODBC copies it and gives it back in SQLParamData info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)PyBuffer_Size(param)) : SQL_DATA_AT_EXEC; } return true; } #endif #if PY_VERSION_HEX >= 0x02060000 static bool GetByteArrayInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.ValueType = SQL_C_BINARY; Py_ssize_t cb = PyByteArray_Size(param); if (cb <= cur->cnxn->binary_maxlength) { info.ParameterType = SQL_VARBINARY; info.ParameterValuePtr = (SQLPOINTER)PyByteArray_AsString(param); info.BufferLength = cb; info.ColumnSize = (SQLUINTEGER)max(cb, 1); info.StrLen_or_Ind = cb; } else { info.ParameterType = SQL_LONGVARBINARY; info.ParameterValuePtr = param; info.ColumnSize = (SQLUINTEGER)cb; info.BufferLength = sizeof(PyObject*); // How big is ParameterValuePtr; ODBC copies it and gives it back in SQLParamData info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)cb) : SQL_DATA_AT_EXEC; } return true; } #endif static bool GetParameterInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { // Determines the type of SQL parameter that will be used for this parameter based on the Python data type. // // Populates `info`. // Hold a reference to param until info is freed, because info will often be holding data borrowed from param. info.pParam = param; if (param == Py_None) return GetNullInfo(cur, index, info); if (param == null_binary) return GetNullBinaryInfo(cur, index, info); if (PyBytes_Check(param)) return GetBytesInfo(cur, index, param, info); if (PyUnicode_Check(param)) return GetUnicodeInfo(cur, index, param, info); if (PyBool_Check(param)) return GetBooleanInfo(cur, index, param, info); if (PyDateTime_Check(param)) return GetDateTimeInfo(cur, index, param, info); if (PyDate_Check(param)) return GetDateInfo(cur, index, param, info); if (PyTime_Check(param)) return GetTimeInfo(cur, index, param, info); if (PyLong_Check(param)) return GetLongInfo(cur, index, param, info); if (PyFloat_Check(param)) return GetFloatInfo(cur, index, param, info); if (PyDecimal_Check(param)) return GetDecimalInfo(cur, index, param, info); #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(param)) return GetByteArrayInfo(cur, index, param, info); #endif #if PY_MAJOR_VERSION < 3 if (PyInt_Check(param)) return GetIntInfo(cur, index, param, info); if (PyBuffer_Check(param)) return GetBufferInfo(cur, index, param, info); #endif RaiseErrorV("HY105", ProgrammingError, "Invalid parameter type. param-index=%zd param-type=%s", index, Py_TYPE(param)->tp_name); return false; } bool BindParameter(Cursor* cur, Py_ssize_t index, ParamInfo& info) { TRACE("BIND: param=%d ValueType=%d (%s) ParameterType=%d (%s) ColumnSize=%d DecimalDigits=%d BufferLength=%d *pcb=%d\n", (index+1), info.ValueType, CTypeName(info.ValueType), info.ParameterType, SqlTypeName(info.ParameterType), info.ColumnSize, info.DecimalDigits, info.BufferLength, info.StrLen_or_Ind); SQLRETURN ret = -1; Py_BEGIN_ALLOW_THREADS ret = SQLBindParameter(cur->hstmt, (SQLUSMALLINT)(index + 1), SQL_PARAM_INPUT, info.ValueType, info.ParameterType, info.ColumnSize, info.DecimalDigits, info.ParameterValuePtr, info.BufferLength, &info.StrLen_or_Ind); Py_END_ALLOW_THREADS; if (GetConnection(cur)->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLBindParameter", GetConnection(cur)->hdbc, cur->hstmt); return false; } return true; } void FreeParameterData(Cursor* cur) { // Unbinds the parameters and frees the parameter buffer. if (cur->paramInfos) { // MS ODBC will crash if we use an HSTMT after the HDBC has been freed. if (cur->cnxn->hdbc != SQL_NULL_HANDLE) { Py_BEGIN_ALLOW_THREADS SQLFreeStmt(cur->hstmt, SQL_RESET_PARAMS); Py_END_ALLOW_THREADS } FreeInfos(cur->paramInfos, cur->paramcount); cur->paramInfos = 0; } } void FreeParameterInfo(Cursor* cur) { // Internal function to free just the cached parameter information. This is not used by the general cursor code // since this information is also freed in the less granular free_results function that clears everything. Py_XDECREF(cur->pPreparedSQL); pyodbc_free(cur->paramtypes); cur->pPreparedSQL = 0; cur->paramtypes = 0; cur->paramcount = 0; } bool PrepareAndBind(Cursor* cur, PyObject* pSql, PyObject* original_params, bool skip_first) { #if PY_MAJOR_VERSION >= 3 if (!PyUnicode_Check(pSql)) { PyErr_SetString(PyExc_TypeError, "SQL must be a Unicode string"); return false; } #endif // // Normalize the parameter variables. // // Since we may replace parameters (we replace objects with Py_True/Py_False when writing to a bit/bool column), // allocate an array and use it instead of the original sequence int params_offset = skip_first ? 1 : 0; Py_ssize_t cParams = original_params == 0 ? 0 : PySequence_Length(original_params) - params_offset; // // Prepare the SQL if necessary. // if (pSql != cur->pPreparedSQL) { FreeParameterInfo(cur); SQLRETURN ret = 0; SQLSMALLINT cParamsT = 0; const char* szErrorFunc = "SQLPrepare"; if (PyUnicode_Check(pSql)) { SQLWChar sql(pSql); Py_BEGIN_ALLOW_THREADS ret = SQLPrepareW(cur->hstmt, sql, SQL_NTS); if (SQL_SUCCEEDED(ret)) { szErrorFunc = "SQLNumParams"; ret = SQLNumParams(cur->hstmt, &cParamsT); } Py_END_ALLOW_THREADS } #if PY_MAJOR_VERSION < 3 else { TRACE("SQLPrepare(%s)\n", PyString_AS_STRING(pSql)); Py_BEGIN_ALLOW_THREADS ret = SQLPrepare(cur->hstmt, (SQLCHAR*)PyString_AS_STRING(pSql), SQL_NTS); if (SQL_SUCCEEDED(ret)) { szErrorFunc = "SQLNumParams"; ret = SQLNumParams(cur->hstmt, &cParamsT); } Py_END_ALLOW_THREADS } #endif if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(szErrorFunc, GetConnection(cur)->hdbc, cur->hstmt); return false; } cur->paramcount = (int)cParamsT; cur->pPreparedSQL = pSql; Py_INCREF(cur->pPreparedSQL); } if (cParams != cur->paramcount) { RaiseErrorV(0, ProgrammingError, "The SQL contains %d parameter markers, but %d parameters were supplied", cur->paramcount, cParams); return false; } cur->paramInfos = (ParamInfo*)pyodbc_malloc(sizeof(ParamInfo) * cParams); if (cur->paramInfos == 0) { PyErr_NoMemory(); return 0; } memset(cur->paramInfos, 0, sizeof(ParamInfo) * cParams); // Since you can't call SQLDesribeParam *after* calling SQLBindParameter, we'll loop through all of the // GetParameterInfos first, then bind. for (Py_ssize_t i = 0; i < cParams; i++) { // PySequence_GetItem returns a *new* reference, which GetParameterInfo will take ownership of. It is stored // in paramInfos and will be released in FreeInfos (which is always eventually called). PyObject* param = PySequence_GetItem(original_params, i + params_offset); if (!GetParameterInfo(cur, i, param, cur->paramInfos[i])) { FreeInfos(cur->paramInfos, cParams); cur->paramInfos = 0; return false; } } for (Py_ssize_t i = 0; i < cParams; i++) { if (!BindParameter(cur, i, cur->paramInfos[i])) { FreeInfos(cur->paramInfos, cParams); cur->paramInfos = 0; return false; } } return true; } static bool GetParamType(Cursor* cur, Py_ssize_t index, SQLSMALLINT& type) { // Returns the ODBC type of the of given parameter. // // Normally we set the parameter type based on the parameter's Python object type (e.g. str --> SQL_CHAR), so this // is only called when the parameter is None. In that case, we can't guess the type and have to use // SQLDescribeParam. // // If the database doesn't support SQLDescribeParam, we return SQL_VARCHAR since it converts to most other types. // However, it will not usually work if the target column is a binary column. if (!GetConnection(cur)->supports_describeparam || cur->paramcount == 0) { type = SQL_VARCHAR; return true; } if (cur->paramtypes == 0) { cur->paramtypes = reinterpret_cast(pyodbc_malloc(sizeof(SQLSMALLINT) * cur->paramcount)); if (cur->paramtypes == 0) { PyErr_NoMemory(); return false; } // SQL_UNKNOWN_TYPE is zero, so zero out all columns since we haven't looked any up yet. memset(cur->paramtypes, 0, sizeof(SQLSMALLINT) * cur->paramcount); } if (cur->paramtypes[index] == SQL_UNKNOWN_TYPE) { SQLULEN ParameterSizePtr; SQLSMALLINT DecimalDigitsPtr; SQLSMALLINT NullablePtr; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLDescribeParam(cur->hstmt, (SQLUSMALLINT)(index + 1), &cur->paramtypes[index], &ParameterSizePtr, &DecimalDigitsPtr, &NullablePtr); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { // This can happen with ("select ?", None). We'll default to VARCHAR which works with most types. cur->paramtypes[index] = SQL_VARCHAR; } } type = cur->paramtypes[index]; return true; } struct NullParam { PyObject_HEAD }; PyTypeObject NullParamType = { PyVarObject_HEAD_INIT(NULL, 0) "pyodbc.NullParam", // tp_name sizeof(NullParam), // tp_basicsize 0, // tp_itemsize 0, // destructor tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare 0, // tp_repr 0, // tp_as_number 0, // tp_as_sequence 0, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, // tp_flags }; PyObject* null_binary; bool Params_init() { if (PyType_Ready(&NullParamType) < 0) return false; null_binary = (PyObject*)PyObject_New(NullParam, &NullParamType); if (null_binary == 0) return false; PyDateTime_IMPORT; return true; } pyodbc-3.0.7/src/errors.h0000666000175000017500000000473112031131304013724 0ustar dokodoko #ifndef _ERRORS_H_ #define _ERRORS_H_ // Sets an exception based on the ODBC SQLSTATE and error message and returns zero. If either handle is not available, // pass SQL_NULL_HANDLE. // // szFunction // The name of the function that failed. Python generates a useful stack trace, but we often don't know where in the // C++ code we failed. // PyObject* RaiseErrorFromHandle(const char* szFunction, HDBC hdbc, HSTMT hstmt); // Sets an exception using a printf-like error message. // // szSqlState // The optional SQLSTATE reported by ODBC. If not provided (sqlstate is NULL or sqlstate[0] is NULL), "HY000" // (General Error) is used. Note that HY000 causes Error to be used if exc_class is not provided. // // exc_class // The optional exception class (DatabaseError, etc.) to construct. If NULL, the appropriate class will be // determined from the SQLSTATE. // PyObject* RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* format, ...); // Constructs an exception and returns it. // // This function is like RaiseErrorFromHandle, but gives you the ability to examine the error first (in particular, // used to examine the SQLSTATE using HasSqlState). If you want to use the error, call PyErr_SetObject(ex->ob_type, // ex). Otherwise, dispose of the error using Py_DECREF(ex). // // szFunction // The name of the function that failed. Python generates a useful stack trace, but we often don't know where in the // C++ code we failed. // PyObject* GetErrorFromHandle(const char* szFunction, HDBC hdbc, HSTMT hstmt); // Returns true if `ex` is a database exception with SQLSTATE `szSqlState`. Returns false otherwise. // // It is safe to call with ex set to zero. The SQLSTATE comparison is case-insensitive. // bool HasSqlState(PyObject* ex, const char* szSqlState); // Returns true if the HSTMT has a diagnostic record with the given SQLSTATE. This is used after SQLGetData call that // returned SQL_SUCCESS_WITH_INFO to see if it also has SQLSTATE 01004, indicating there is more data. // bool HasSqlState(HSTMT hstmt, const char* szSqlState); inline PyObject* RaiseErrorFromException(PyObject* pError) { // PyExceptionInstance_Class doesn't exist in 2.4 #if PY_MAJOR_VERSION >= 3 PyErr_SetObject((PyObject*)Py_TYPE(pError), pError); #else PyObject* cls = (PyObject*)((PyInstance_Check(pError) ? (PyObject*)((PyInstanceObject*)pError)->in_class : (PyObject*)(Py_TYPE(pError)))); PyErr_SetObject(cls, pError); #endif return 0; } #endif // _ERRORS_H_ pyodbc-3.0.7/src/cnxninfo.cpp0000666000175000017500000001601112146217440014574 0ustar dokodoko // There is a bunch of information we want from connections which requires calls to SQLGetInfo when we first connect. // However, this isn't something we really want to do for every connection, so we cache it by the hash of the // connection string. When we create a new connection, we copy the values into the connection structure. // // We hash the connection string since it may contain sensitive information we wouldn't want exposed in a core dump. #include "pyodbc.h" #include "cnxninfo.h" #include "connection.h" #include "wrapper.h" // Maps from a Python string of the SHA1 hash to a CnxnInfo object. // static PyObject* map_hash_to_info; static PyObject* hashlib; // The hashlib module if Python 2.5+ static PyObject* sha; // The sha module if Python 2.4 static PyObject* update; // The string 'update', used in GetHash. void CnxnInfo_init() { // Called during startup to give us a chance to import the hash code. If we can't find it, we'll print a warning // to the console and not cache anything. // First try hashlib which was added in 2.5. 2.6 complains using warnings which we don't want affecting the // caller. map_hash_to_info = PyDict_New(); update = PyString_FromString("update"); hashlib = PyImport_ImportModule("hashlib"); if (!hashlib) { sha = PyImport_ImportModule("sha"); } } static PyObject* GetHash(PyObject* p) { #if PY_MAJOR_VERSION >= 3 Object bytes(PyUnicode_EncodeUTF8(PyUnicode_AS_UNICODE(p), PyUnicode_GET_SIZE(p), 0)); if (!bytes) return 0; p = bytes.Get(); #endif if (hashlib) { Object hash(PyObject_CallMethod(hashlib, "new", "s", "sha1")); if (!hash.IsValid()) return 0; PyObject_CallMethodObjArgs(hash, update, p, 0); return PyObject_CallMethod(hash, "hexdigest", 0); } if (sha) { Object hash(PyObject_CallMethod(sha, "new", 0)); if (!hash.IsValid()) return 0; PyObject_CallMethodObjArgs(hash, update, p, 0); return PyObject_CallMethod(hash, "hexdigest", 0); } return 0; } static PyObject* CnxnInfo_New(Connection* cnxn) { #ifdef _MSC_VER #pragma warning(disable : 4365) #endif CnxnInfo* p = PyObject_NEW(CnxnInfo, &CnxnInfoType); if (!p) return 0; Object info((PyObject*)p); // set defaults p->odbc_major = 3; p->odbc_minor = 50; p->supports_describeparam = false; p->datetime_precision = 19; // default: "yyyy-mm-dd hh:mm:ss" p->need_long_data_len = false; // WARNING: The GIL lock is released for the *entire* function here. Do not touch any objects, call Python APIs, // etc. We are simply making ODBC calls and setting atomic values (ints & chars). Also, make sure the lock gets // released -- do not add an early exit. SQLRETURN ret; Py_BEGIN_ALLOW_THREADS char szVer[20]; SQLSMALLINT cch = 0; ret = SQLGetInfo(cnxn->hdbc, SQL_DRIVER_ODBC_VER, szVer, _countof(szVer), &cch); if (SQL_SUCCEEDED(ret)) { char* dot = strchr(szVer, '.'); if (dot) { *dot = '\0'; p->odbc_major=(char)atoi(szVer); p->odbc_minor=(char)atoi(dot + 1); } } char szYN[2]; if (SQL_SUCCEEDED(SQLGetInfo(cnxn->hdbc, SQL_DESCRIBE_PARAMETER, szYN, _countof(szYN), &cch))) p->supports_describeparam = szYN[0] == 'Y'; if (SQL_SUCCEEDED(SQLGetInfo(cnxn->hdbc, SQL_NEED_LONG_DATA_LEN, szYN, _countof(szYN), &cch))) p->need_long_data_len = (szYN[0] == 'Y'); // These defaults are tiny, but are necessary for Access. p->varchar_maxlength = 255; p->wvarchar_maxlength = 255; p->binary_maxlength = 510; HSTMT hstmt = 0; if (SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_STMT, cnxn->hdbc, &hstmt))) { SQLINTEGER columnsize; if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_TYPE_TIMESTAMP)) && SQL_SUCCEEDED(SQLFetch(hstmt))) if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0))) p->datetime_precision = (int)columnsize; if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_VARCHAR)) && SQL_SUCCEEDED(SQLFetch(hstmt))) if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0))) p->varchar_maxlength = (int)columnsize; if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_WVARCHAR)) && SQL_SUCCEEDED(SQLFetch(hstmt))) if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0))) p->wvarchar_maxlength = (int)columnsize; if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_BINARY)) && SQL_SUCCEEDED(SQLFetch(hstmt))) if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0))) p->binary_maxlength = (int)columnsize; SQLFreeStmt(hstmt, SQL_CLOSE); } Py_END_ALLOW_THREADS // WARNING: Released the lock now. return info.Detach(); } PyObject* GetConnectionInfo(PyObject* pConnectionString, Connection* cnxn) { // Looks-up or creates a CnxnInfo object for the given connection string. The connection string can be a Unicode // or String object. Object hash(GetHash(pConnectionString)); if (hash.IsValid()) { PyObject* info = PyDict_GetItem(map_hash_to_info, hash); if (info) { Py_INCREF(info); return info; } } PyObject* info = CnxnInfo_New(cnxn); if (info != 0 && hash.IsValid()) PyDict_SetItem(map_hash_to_info, hash, info); return info; } PyTypeObject CnxnInfoType = { PyVarObject_HEAD_INIT(0, 0) "pyodbc.CnxnInfo", // tp_name sizeof(CnxnInfo), // tp_basicsize 0, // tp_itemsize 0, // destructor tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare 0, // tp_repr 0, // tp_as_number 0, // tp_as_sequence 0, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, // tp_flags }; pyodbc-3.0.7/src/cursor.cpp0000666000175000017500000022126112146217440014274 0ustar dokodoko // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // Note: This project has gone from C++ (when it was ported from pypgdb) to C, back to C++ (where it will stay). If // you are making modifications, feel free to move variable declarations from the top of functions to where they are // actually used. #include "pyodbc.h" #include "cursor.h" #include "pyodbcmodule.h" #include "connection.h" #include "row.h" #include "buffer.h" #include "params.h" #include "errors.h" #include "getdata.h" #include "dbspecific.h" #include "sqlwchar.h" #include #include "wrapper.h" enum { CURSOR_REQUIRE_CNXN = 0x00000001, CURSOR_REQUIRE_OPEN = 0x00000003, // includes _CNXN CURSOR_REQUIRE_RESULTS = 0x00000007, // includes _OPEN CURSOR_RAISE_ERROR = 0x00000010, }; inline bool StatementIsValid(Cursor* cursor) { return cursor->cnxn != 0 && ((Connection*)cursor->cnxn)->hdbc != SQL_NULL_HANDLE && cursor->hstmt != SQL_NULL_HANDLE; } extern PyTypeObject CursorType; inline bool Cursor_Check(PyObject* o) { return o != 0 && Py_TYPE(o) == &CursorType; } static Cursor* Cursor_Validate(PyObject* obj, DWORD flags) { // Validates that a PyObject is a Cursor (like Cursor_Check) and optionally some other requirements controlled by // `flags`. If valid and all requirements (from the flags) are met, the cursor is returned, cast to Cursor*. // Otherwise zero is returned. // // Designed to be used at the top of methods to convert the PyObject pointer and perform necessary checks. // // Valid flags are from the CURSOR_ enum above. Note that unless CURSOR_RAISE_ERROR is supplied, an exception // will not be set. (When deallocating, we really don't want an exception.) Connection* cnxn = 0; Cursor* cursor = 0; if (!Cursor_Check(obj)) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "Invalid cursor object."); return 0; } cursor = (Cursor*)obj; cnxn = (Connection*)cursor->cnxn; if (cnxn == 0) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "Attempt to use a closed cursor."); return 0; } if (IsSet(flags, CURSOR_REQUIRE_OPEN)) { if (cursor->hstmt == SQL_NULL_HANDLE) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "Attempt to use a closed cursor."); return 0; } if (cnxn->hdbc == SQL_NULL_HANDLE) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "The cursor's connection has been closed."); return 0; } } if (IsSet(flags, CURSOR_REQUIRE_RESULTS) && cursor->colinfos == 0) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "No results. Previous SQL was not a query."); return 0; } return cursor; } inline bool IsNumericType(SQLSMALLINT sqltype) { switch (sqltype) { case SQL_DECIMAL: case SQL_NUMERIC: case SQL_REAL: case SQL_FLOAT: case SQL_DOUBLE: case SQL_SMALLINT: case SQL_INTEGER: case SQL_TINYINT: case SQL_BIGINT: return true; } return false; } static PyObject* PythonTypeFromSqlType(Cursor* cur, const SQLCHAR* name, SQLSMALLINT type, bool unicode_results) { // Returns a type object ('int', 'str', etc.) for the given ODBC C type. This is used to populate // Cursor.description with the type of Python object that will be returned for each column. // // name // The name of the column, only used to create error messages. // // type // The ODBC C type (SQL_C_CHAR, etc.) of the column. // // The returned object does not have its reference count incremented! int conv_index = GetUserConvIndex(cur, type); if (conv_index != -1) return (PyObject*)&PyString_Type; PyObject* pytype = 0; switch (type) { case SQL_CHAR: case SQL_VARCHAR: case SQL_LONGVARCHAR: case SQL_GUID: case SQL_SS_XML: if (unicode_results) pytype = (PyObject*)&PyUnicode_Type; else pytype = (PyObject*)&PyString_Type; break; case SQL_DECIMAL: case SQL_NUMERIC: pytype = (PyObject*)decimal_type; break; case SQL_REAL: case SQL_FLOAT: case SQL_DOUBLE: pytype = (PyObject*)&PyFloat_Type; break; case SQL_SMALLINT: case SQL_INTEGER: case SQL_TINYINT: pytype = (PyObject*)&PyInt_Type; break; case SQL_TYPE_DATE: pytype = (PyObject*)PyDateTimeAPI->DateType; break; case SQL_TYPE_TIME: case SQL_SS_TIME2: // SQL Server 2008+ pytype = (PyObject*)PyDateTimeAPI->TimeType; break; case SQL_TYPE_TIMESTAMP: pytype = (PyObject*)PyDateTimeAPI->DateTimeType; break; case SQL_BIGINT: pytype = (PyObject*)&PyLong_Type; break; case SQL_BIT: pytype = (PyObject*)&PyBool_Type; break; case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: #if PY_MAJOR_VERSION >= 3 pytype = (PyObject*)&PyBytes_Type; #else pytype = (PyObject*)&PyBuffer_Type; #endif break; case SQL_WCHAR: case SQL_WVARCHAR: case SQL_WLONGVARCHAR: pytype = (PyObject*)&PyUnicode_Type; break; default: return RaiseErrorV(0, 0, "ODBC data type %d is not supported. Cannot read column %s.", type, (const char*)name); } Py_INCREF(pytype); return pytype; } static bool create_name_map(Cursor* cur, SQLSMALLINT field_count, bool lower) { // Called after an execute to construct the map shared by rows. bool success = false; PyObject *desc = 0, *colmap = 0, *colinfo = 0, *type = 0, *index = 0, *nullable_obj=0; SQLRETURN ret; I(cur->hstmt != SQL_NULL_HANDLE && cur->colinfos != 0); // These are the values we expect after free_results. If this function fails, we do not modify any members, so // they should be set to something Cursor_close can deal with. I(cur->description == Py_None); I(cur->map_name_to_index == 0); if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } desc = PyTuple_New((Py_ssize_t)field_count); colmap = PyDict_New(); if (!desc || !colmap) goto done; for (int i = 0; i < field_count; i++) { SQLCHAR name[300]; SQLSMALLINT nDataType; SQLULEN nColSize; // precision SQLSMALLINT cDecimalDigits; // scale SQLSMALLINT nullable; Py_BEGIN_ALLOW_THREADS ret = SQLDescribeCol(cur->hstmt, (SQLUSMALLINT)(i + 1), name, _countof(name), 0, &nDataType, &nColSize, &cDecimalDigits, &nullable); Py_END_ALLOW_THREADS if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); goto done; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLDescribeCol", cur->cnxn->hdbc, cur->hstmt); goto done; } TRACE("Col %d: type=%d colsize=%d\n", (i+1), (int)nDataType, (int)nColSize); if (lower) _strlwr((char*)name); type = PythonTypeFromSqlType(cur, name, nDataType, cur->cnxn->unicode_results); if (!type) goto done; switch (nullable) { case SQL_NO_NULLS: nullable_obj = Py_False; break; case SQL_NULLABLE: nullable_obj = Py_True; break; case SQL_NULLABLE_UNKNOWN: default: nullable_obj = Py_None; break; } // The Oracle ODBC driver has a bug (I call it) that it returns a data size of 0 when a numeric value is // retrieved from a UNION: http://support.microsoft.com/?scid=kb%3Ben-us%3B236786&x=13&y=6 // // Unfortunately, I don't have a test system for this yet, so I'm *trying* something. (Not a good sign.) If // the size is zero and it appears to be a numeric type, we'll try to come up with our own length using any // other data we can get. if (nColSize == 0 && IsNumericType(nDataType)) { // I'm not sure how if (cDecimalDigits != 0) { nColSize = (SQLUINTEGER)(cDecimalDigits + 3); } else { // I'm not sure if this is a good idea, but ... nColSize = 42; } } colinfo = Py_BuildValue("(sOOiiiO)", (char*)name, type, // type_code Py_None, // display size (int)nColSize, // internal_size (int)nColSize, // precision (int)cDecimalDigits, // scale nullable_obj); // null_ok if (!colinfo) goto done; nullable_obj = 0; index = PyInt_FromLong(i); if (!index) goto done; PyDict_SetItemString(colmap, (const char*)name, index); Py_DECREF(index); // SetItemString increments index = 0; PyTuple_SET_ITEM(desc, i, colinfo); colinfo = 0; // reference stolen by SET_ITEM } Py_XDECREF(cur->description); cur->description = desc; desc = 0; cur->map_name_to_index = colmap; colmap = 0; success = true; done: Py_XDECREF(nullable_obj); Py_XDECREF(desc); Py_XDECREF(colmap); Py_XDECREF(index); Py_XDECREF(colinfo); return success; } enum free_results_flags { FREE_STATEMENT = 0x01, KEEP_STATEMENT = 0x02, FREE_PREPARED = 0x04, KEEP_PREPARED = 0x08, STATEMENT_MASK = 0x03, PREPARED_MASK = 0x0C }; static bool free_results(Cursor* self, int flags) { // Internal function called any time we need to free the memory associated with query results. It is safe to call // this even when a query has not been executed. // If we ran out of memory, it is possible that we have a cursor but colinfos is zero. However, we should be // deleting this object, so the cursor will be freed when the HSTMT is destroyed. */ I((flags & STATEMENT_MASK) != 0); I((flags & PREPARED_MASK) != 0); if ((flags & PREPARED_MASK) == FREE_PREPARED) { Py_XDECREF(self->pPreparedSQL); self->pPreparedSQL = 0; } if (self->colinfos) { pyodbc_free(self->colinfos); self->colinfos = 0; } if (StatementIsValid(self)) { if ((flags & STATEMENT_MASK) == FREE_STATEMENT) { Py_BEGIN_ALLOW_THREADS SQLFreeStmt(self->hstmt, SQL_CLOSE); Py_END_ALLOW_THREADS; } else { Py_BEGIN_ALLOW_THREADS SQLFreeStmt(self->hstmt, SQL_UNBIND); SQLFreeStmt(self->hstmt, SQL_RESET_PARAMS); Py_END_ALLOW_THREADS; } if (self->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } } if (self->description != Py_None) { Py_DECREF(self->description); self->description = Py_None; Py_INCREF(Py_None); } if (self->map_name_to_index) { Py_DECREF(self->map_name_to_index); self->map_name_to_index = 0; } self->rowcount = -1; return true; } static void closeimpl(Cursor* cur) { // An internal function for the shared 'closing' code used by Cursor_close and Cursor_dealloc. // // This method releases the GIL lock while closing, so verify the HDBC still exists if you use it. free_results(cur, FREE_STATEMENT | FREE_PREPARED); FreeParameterInfo(cur); FreeParameterData(cur); if (StatementIsValid(cur)) { HSTMT hstmt = cur->hstmt; cur->hstmt = SQL_NULL_HANDLE; Py_BEGIN_ALLOW_THREADS SQLFreeHandle(SQL_HANDLE_STMT, hstmt); Py_END_ALLOW_THREADS } Py_XDECREF(cur->pPreparedSQL); Py_XDECREF(cur->description); Py_XDECREF(cur->map_name_to_index); Py_XDECREF(cur->cnxn); cur->pPreparedSQL = 0; cur->description = 0; cur->map_name_to_index = 0; cur->cnxn = 0; } static char close_doc[] = "Close the cursor now (rather than whenever __del__ is called). The cursor will\n" "be unusable from this point forward; a ProgrammingError exception will be\n" "raised if any operation is attempted with the cursor."; static PyObject* Cursor_close(PyObject* self, PyObject* args) { UNUSED(args); Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; closeimpl(cursor); Py_INCREF(Py_None); return Py_None; } static void Cursor_dealloc(Cursor* cursor) { if (Cursor_Validate((PyObject*)cursor, CURSOR_REQUIRE_CNXN)) { closeimpl(cursor); } PyObject_Del(cursor); } bool InitColumnInfo(Cursor* cursor, SQLUSMALLINT iCol, ColumnInfo* pinfo) { // Initializes ColumnInfo from result set metadata. SQLRETURN ret; // REVIEW: This line fails on OS/X with the FileMaker driver : http://www.filemaker.com/support/updaters/xdbc_odbc_mac.html // // I suspect the problem is that it doesn't allow NULLs in some of the parameters, so I'm going to supply them all // to see what happens. SQLCHAR ColumnName[200]; SQLSMALLINT BufferLength = _countof(ColumnName); SQLSMALLINT NameLength = 0; SQLSMALLINT DataType = 0; SQLULEN ColumnSize = 0; SQLSMALLINT DecimalDigits = 0; SQLSMALLINT Nullable = 0; Py_BEGIN_ALLOW_THREADS ret = SQLDescribeCol(cursor->hstmt, iCol, ColumnName, BufferLength, &NameLength, &DataType, &ColumnSize, &DecimalDigits, &Nullable); Py_END_ALLOW_THREADS pinfo->sql_type = DataType; pinfo->column_size = ColumnSize; if (cursor->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLDescribeCol", cursor->cnxn->hdbc, cursor->hstmt); return false; } // If it is an integer type, determine if it is signed or unsigned. The buffer size is the same but we'll need to // know when we convert to a Python integer. switch (pinfo->sql_type) { case SQL_TINYINT: case SQL_SMALLINT: case SQL_INTEGER: case SQL_BIGINT: { SQLLEN f; Py_BEGIN_ALLOW_THREADS ret = SQLColAttribute(cursor->hstmt, iCol, SQL_DESC_UNSIGNED, 0, 0, 0, &f); Py_END_ALLOW_THREADS if (cursor->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLColAttribute", cursor->cnxn->hdbc, cursor->hstmt); return false; } pinfo->is_unsigned = (f == SQL_TRUE); break; } default: pinfo->is_unsigned = false; } return true; } static bool PrepareResults(Cursor* cur, int cCols) { // Called after a SELECT has been executed to perform pre-fetch work. // // Allocates the ColumnInfo structures describing the returned data. int i; I(cur->colinfos == 0); cur->colinfos = (ColumnInfo*)pyodbc_malloc(sizeof(ColumnInfo) * cCols); if (cur->colinfos == 0) { PyErr_NoMemory(); return false; } for (i = 0; i < cCols; i++) { if (!InitColumnInfo(cur, (SQLUSMALLINT)(i + 1), &cur->colinfos[i])) { pyodbc_free(cur->colinfos); cur->colinfos = 0; return false; } } return true; } static PyObject* execute(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first) { // Internal function to execute SQL, called by .execute and .executemany. // // pSql // A PyString, PyUnicode, or derived object containing the SQL. // // params // Pointer to an optional sequence of parameters, and possibly the SQL statement (see skip_first): // (SQL, param1, param2) or (param1, param2). // // skip_first // If true, the first element in `params` is ignored. (It will be the SQL statement and `params` will be the // entire tuple passed to Cursor.execute.) Otherwise all of the params are used. (This case occurs when called // from Cursor.executemany, in which case the sequences do not contain the SQL statement.) Ignored if params is // zero. if (params) { if (!PyTuple_Check(params) && !PyList_Check(params) && !Row_Check(params)) return RaiseErrorV(0, PyExc_TypeError, "Params must be in a list, tuple, or Row"); } // Normalize the parameter variables. int params_offset = skip_first ? 1 : 0; Py_ssize_t cParams = params == 0 ? 0 : PySequence_Length(params) - params_offset; SQLRETURN ret = 0; free_results(cur, FREE_STATEMENT | KEEP_PREPARED); const char* szLastFunction = ""; if (cParams > 0) { // There are parameters, so we'll need to prepare the SQL statement and bind the parameters. (We need to // prepare the statement because we can't bind a NULL (None) object without knowing the target datatype. There // is no one data type that always maps to the others (no, not even varchar)). if (!PrepareAndBind(cur, pSql, params, skip_first)) return 0; szLastFunction = "SQLExecute"; Py_BEGIN_ALLOW_THREADS ret = SQLExecute(cur->hstmt); Py_END_ALLOW_THREADS } else { // REVIEW: Why don't we always prepare? It is highly unlikely that a user would need to execute the same SQL // repeatedly if it did not have parameters, so we are not losing performance, but it would simplify the code. Py_XDECREF(cur->pPreparedSQL); cur->pPreparedSQL = 0; szLastFunction = "SQLExecDirect"; #if PY_MAJOR_VERSION < 3 if (PyString_Check(pSql)) { Py_BEGIN_ALLOW_THREADS ret = SQLExecDirect(cur->hstmt, (SQLCHAR*)PyString_AS_STRING(pSql), SQL_NTS); Py_END_ALLOW_THREADS } else #endif { SQLWChar query(pSql); if (!query) return 0; Py_BEGIN_ALLOW_THREADS ret = SQLExecDirectW(cur->hstmt, query, SQL_NTS); Py_END_ALLOW_THREADS } } if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. FreeParameterData(cur); return RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); } if (!SQL_SUCCEEDED(ret) && ret != SQL_NEED_DATA && ret != SQL_NO_DATA) { // We could try dropping through the while and if below, but if there is an error, we need to raise it before // FreeParameterData calls more ODBC functions. RaiseErrorFromHandle("SQLExecDirectW", cur->cnxn->hdbc, cur->hstmt); FreeParameterData(cur); return 0; } while (ret == SQL_NEED_DATA) { // We have bound a PyObject* using SQL_LEN_DATA_AT_EXEC, so ODBC is asking us for the data now. We gave the // PyObject pointer to ODBC in SQLBindParameter -- SQLParamData below gives the pointer back to us. // // Note that we did not increment the pointer reference for this since we are still in the same C function call // that performed the bind. szLastFunction = "SQLParamData"; PyObject* pParam; Py_BEGIN_ALLOW_THREADS ret = SQLParamData(cur->hstmt, (SQLPOINTER*)&pParam); Py_END_ALLOW_THREADS if (ret != SQL_NEED_DATA && ret != SQL_NO_DATA && !SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLParamData", cur->cnxn->hdbc, cur->hstmt); TRACE("SQLParamData() --> %d\n", ret); if (ret == SQL_NEED_DATA) { szLastFunction = "SQLPutData"; if (PyUnicode_Check(pParam)) { SQLWChar wchar(pParam); // Will convert to SQLWCHAR if necessary. Py_ssize_t offset = 0; // in characters Py_ssize_t length = wchar.size(); // in characters while (offset < length) { SQLLEN remaining = min(cur->cnxn->varchar_maxlength, length - offset); Py_BEGIN_ALLOW_THREADS ret = SQLPutData(cur->hstmt, (SQLPOINTER)wchar[offset], (SQLLEN)(remaining * sizeof(SQLWCHAR))); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLPutData", cur->cnxn->hdbc, cur->hstmt); offset += remaining; } } else if (PyBytes_Check(pParam)) { const char* p = PyBytes_AS_STRING(pParam); SQLLEN offset = 0; SQLLEN cb = (SQLLEN)PyBytes_GET_SIZE(pParam); while (offset < cb) { SQLLEN remaining = min(cur->cnxn->varchar_maxlength, cb - offset); TRACE("SQLPutData [%d] (%d) %s\n", offset, remaining, &p[offset]); Py_BEGIN_ALLOW_THREADS ret = SQLPutData(cur->hstmt, (SQLPOINTER)&p[offset], remaining); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLPutData", cur->cnxn->hdbc, cur->hstmt); offset += remaining; } } #if PY_VERSION_HEX >= 0x02060000 else if (PyByteArray_Check(pParam)) { const char* p = PyByteArray_AS_STRING(pParam); SQLLEN offset = 0; SQLLEN cb = (SQLLEN)PyByteArray_GET_SIZE(pParam); while (offset < cb) { SQLLEN remaining = min(cur->cnxn->varchar_maxlength, cb - offset); TRACE("SQLPutData [%d] (%d) %s\n", offset, remaining, &p[offset]); Py_BEGIN_ALLOW_THREADS ret = SQLPutData(cur->hstmt, (SQLPOINTER)&p[offset], remaining); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLPutData", cur->cnxn->hdbc, cur->hstmt); offset += remaining; } } #endif #if PY_MAJOR_VERSION < 3 else if (PyBuffer_Check(pParam)) { // Buffers can have multiple segments, so we might need multiple writes. Looping through buffers isn't // difficult, but we've wrapped it up in an iterator object to keep this loop simple. BufferSegmentIterator it(pParam); byte* pb; SQLLEN cb; while (it.Next(pb, cb)) { Py_BEGIN_ALLOW_THREADS ret = SQLPutData(cur->hstmt, pb, cb); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLPutData", cur->cnxn->hdbc, cur->hstmt); } } #endif ret = SQL_NEED_DATA; } } FreeParameterData(cur); if (ret == SQL_NO_DATA) { // Example: A delete statement that did not delete anything. cur->rowcount = 0; Py_INCREF(cur); return (PyObject*)cur; } if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(szLastFunction, cur->cnxn->hdbc, cur->hstmt); SQLLEN cRows = -1; Py_BEGIN_ALLOW_THREADS ret = SQLRowCount(cur->hstmt, &cRows); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLRowCount", cur->cnxn->hdbc, cur->hstmt); cur->rowcount = (int)cRows; TRACE("SQLRowCount: %d\n", cRows); SQLSMALLINT cCols = 0; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { // Note: The SQL Server driver sometimes returns HY007 here if multiple statements (separated by ;) were // submitted. This is not documented, but I've seen it with multiple successful inserts. return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); } TRACE("SQLNumResultCols: %d\n", cCols); if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. return RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); } if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLRowCount", cur->cnxn->hdbc, cur->hstmt); if (cCols != 0) { // A result set was created. if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, lowercase())) return 0; } Py_INCREF(cur); return (PyObject*)cur; } inline bool IsSequence(PyObject* p) { // Used to determine if the first parameter of execute is a collection of SQL parameters or is a SQL parameter // itself. If the first parameter is a list, tuple, or Row object, then we consider it a collection. Anything // else, including other sequences (e.g. bytearray), are considered SQL parameters. return PyList_Check(p) || PyTuple_Check(p) || Row_Check(p); } static char execute_doc[] = "C.execute(sql, [params]) --> Cursor\n" "\n" "Prepare and execute a database query or command.\n" "\n" "Parameters may be provided as a sequence (as specified by the DB API) or\n" "simply passed in one after another (non-standard):\n" "\n" " cursor.execute(sql, (param1, param2))\n" "\n" " or\n" "\n" " cursor.execute(sql, param1, param2)\n"; PyObject* Cursor_execute(PyObject* self, PyObject* args) { Py_ssize_t cParams = PyTuple_Size(args) - 1; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; if (cParams < 0) { PyErr_SetString(PyExc_TypeError, "execute() takes at least 1 argument (0 given)"); return 0; } PyObject* pSql = PyTuple_GET_ITEM(args, 0); if (!PyString_Check(pSql) && !PyUnicode_Check(pSql)) { PyErr_SetString(PyExc_TypeError, "The first argument to execute must be a string or unicode query."); return 0; } // Figure out if there were parameters and how they were passed. Our optional parameter passing complicates this slightly. bool skip_first = false; PyObject *params = 0; if (cParams == 1 && IsSequence(PyTuple_GET_ITEM(args, 1))) { // There is a single argument and it is a sequence, so we must treat it as a sequence of parameters. (This is // the normal Cursor.execute behavior.) params = PyTuple_GET_ITEM(args, 1); skip_first = false; } else if (cParams > 0) { params = args; skip_first = true; } // Execute. return execute(cursor, pSql, params, skip_first); } static PyObject* Cursor_executemany(PyObject* self, PyObject* args) { Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; cursor->rowcount = -1; PyObject *pSql, *param_seq; if (!PyArg_ParseTuple(args, "OO", &pSql, ¶m_seq)) return 0; if (!PyString_Check(pSql) && !PyUnicode_Check(pSql)) { PyErr_SetString(PyExc_TypeError, "The first argument to execute must be a string or unicode query."); return 0; } if (IsSequence(param_seq)) { Py_ssize_t c = PySequence_Size(param_seq); if (c == 0) { PyErr_SetString(ProgrammingError, "The second parameter to executemany must not be empty."); return 0; } for (Py_ssize_t i = 0; i < c; i++) { PyObject* params = PySequence_GetItem(param_seq, i); PyObject* result = execute(cursor, pSql, params, false); bool success = result != 0; Py_XDECREF(result); Py_DECREF(params); if (!success) { cursor->rowcount = -1; return 0; } } } else if (PyGen_Check(param_seq) || PyIter_Check(param_seq)) { Object iter; if (PyGen_Check(param_seq)) { iter = PyObject_GetIter(param_seq); } else { iter = param_seq; Py_INCREF(param_seq); } Object params; while (params.Attach(PyIter_Next(iter))) { PyObject* result = execute(cursor, pSql, params, false); bool success = result != 0; Py_XDECREF(result); if (!success) { cursor->rowcount = -1; return 0; } } if (PyErr_Occurred()) return 0; } else { PyErr_SetString(ProgrammingError, "The second parameter to executemany must be a sequence, iterator, or generator."); return 0; } cursor->rowcount = -1; Py_RETURN_NONE; } static PyObject* Cursor_fetch(Cursor* cur) { // Internal function to fetch a single row and construct a Row object from it. Used by all of the fetching // functions. // // Returns a Row object if successful. If there are no more rows, zero is returned. If an error occurs, an // exception is set and zero is returned. (To differentiate between the last two, use PyErr_Occurred.) SQLRETURN ret = 0; Py_ssize_t field_count, i; PyObject** apValues; Py_BEGIN_ALLOW_THREADS ret = SQLFetch(cur->hstmt); Py_END_ALLOW_THREADS if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. return RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); } if (ret == SQL_NO_DATA) return 0; if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLFetch", cur->cnxn->hdbc, cur->hstmt); field_count = PyTuple_GET_SIZE(cur->description); apValues = (PyObject**)pyodbc_malloc(sizeof(PyObject*) * field_count); if (apValues == 0) return PyErr_NoMemory(); for (i = 0; i < field_count; i++) { PyObject* value = GetData(cur, i); if (!value) { FreeRowValues(i, apValues); return 0; } apValues[i] = value; } return (PyObject*)Row_InternalNew(cur->description, cur->map_name_to_index, field_count, apValues); } static PyObject* Cursor_fetchlist(Cursor* cur, Py_ssize_t max) { // max // The maximum number of rows to fetch. If -1, fetch all rows. // // Returns a list of Rows. If there are no rows, an empty list is returned. PyObject* results; PyObject* row; results = PyList_New(0); if (!results) return 0; while (max == -1 || max > 0) { row = Cursor_fetch(cur); if (!row) { if (PyErr_Occurred()) { Py_DECREF(results); return 0; } break; } PyList_Append(results, row); Py_DECREF(row); if (max != -1) max--; } return results; } static PyObject* Cursor_iter(PyObject* self) { Py_INCREF(self); return self; } static PyObject* Cursor_iternext(PyObject* self) { // Implements the iterator protocol for cursors. Fetches the next row. Returns zero without setting an exception // when there are no rows. PyObject* result; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; result = Cursor_fetch(cursor); return result; } static PyObject* Cursor_fetchone(PyObject* self, PyObject* args) { UNUSED(args); PyObject* row; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; row = Cursor_fetch(cursor); if (!row) { if (PyErr_Occurred()) return 0; Py_RETURN_NONE; } return row; } static PyObject* Cursor_fetchall(PyObject* self, PyObject* args) { UNUSED(args); PyObject* result; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; result = Cursor_fetchlist(cursor, -1); return result; } static PyObject* Cursor_fetchmany(PyObject* self, PyObject* args) { long rows; PyObject* result; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; rows = cursor->arraysize; if (!PyArg_ParseTuple(args, "|l", &rows)) return 0; result = Cursor_fetchlist(cursor, rows); return result; } static char tables_doc[] = "C.tables(table=None, catalog=None, schema=None, tableType=None) --> self\n" "\n" "Executes SQLTables and creates a results set of tables defined in the data\n" "source.\n" "\n" "The table, catalog, and schema interpret the '_' and '%' characters as\n" "wildcards. The escape character is driver specific, so use\n" "`Connection.searchescape`.\n" "\n" "Each row fetched has the following columns:\n" " 0) table_cat: The catalog name.\n" " 1) table_schem: The schema name.\n" " 2) table_name: The table name.\n" " 3) table_type: One of 'TABLE', 'VIEW', SYSTEM TABLE', 'GLOBAL TEMPORARY'\n" " 'LOCAL TEMPORARY', 'ALIAS', 'SYNONYM', or a data source-specific type name."; char* Cursor_tables_kwnames[] = { "table", "catalog", "schema", "tableType", 0 }; static PyObject* Cursor_tables(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szCatalog = 0; const char* szSchema = 0; const char* szTableName = 0; const char* szTableType = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ssss", Cursor_tables_kwnames, &szTableName, &szCatalog, &szSchema, &szTableType)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLTables(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTableName, SQL_NTS, (SQLCHAR*)szTableType, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLTables", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char columns_doc[] = "C.columns(table=None, catalog=None, schema=None, column=None)\n\n" "Creates a results set of column names in specified tables by executing the ODBC SQLColumns function.\n" "Each row fetched has the following columns:\n" " 0) table_cat\n" " 1) table_schem\n" " 2) table_name\n" " 3) column_name\n" " 4) data_type\n" " 5) type_name\n" " 6) column_size\n" " 7) buffer_length\n" " 8) decimal_digits\n" " 9) num_prec_radix\n" " 10) nullable\n" " 11) remarks\n" " 12) column_def\n" " 13) sql_data_type\n" " 14) sql_datetime_sub\n" " 15) char_octet_length\n" " 16) ordinal_position\n" " 17) is_nullable"; char* Cursor_column_kwnames[] = { "table", "catalog", "schema", "column", 0 }; static PyObject* Cursor_columns(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szCatalog = 0; const char* szSchema = 0; const char* szTable = 0; const char* szColumn = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ssss", Cursor_column_kwnames, &szTable, &szCatalog, &szSchema, &szColumn)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLColumns(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, (SQLCHAR*)szColumn, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLColumns", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char statistics_doc[] = "C.statistics(catalog=None, schema=None, unique=False, quick=True) --> self\n\n" "Creates a results set of statistics about a single table and the indexes associated with \n" "the table by executing SQLStatistics.\n" "unique\n" " If True, only unique indexes are retured. Otherwise all indexes are returned.\n" "quick\n" " If True, CARDINALITY and PAGES are returned only if they are readily available\n" " from the server\n" "\n" "Each row fetched has the following columns:\n\n" " 0) table_cat\n" " 1) table_schem\n" " 2) table_name\n" " 3) non_unique\n" " 4) index_qualifier\n" " 5) index_name\n" " 6) type\n" " 7) ordinal_position\n" " 8) column_name\n" " 9) asc_or_desc\n" " 10) cardinality\n" " 11) pages\n" " 12) filter_condition"; char* Cursor_statistics_kwnames[] = { "table", "catalog", "schema", "unique", "quick", 0 }; static PyObject* Cursor_statistics(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szCatalog = 0; const char* szSchema = 0; const char* szTable = 0; PyObject* pUnique = Py_False; PyObject* pQuick = Py_True; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|ssOO", Cursor_statistics_kwnames, &szTable, &szCatalog, &szSchema, &pUnique, &pQuick)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLUSMALLINT nUnique = (SQLUSMALLINT)(PyObject_IsTrue(pUnique) ? SQL_INDEX_UNIQUE : SQL_INDEX_ALL); SQLUSMALLINT nReserved = (SQLUSMALLINT)(PyObject_IsTrue(pQuick) ? SQL_QUICK : SQL_ENSURE); SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLStatistics(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, nUnique, nReserved); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLStatistics", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char rowIdColumns_doc[] = "C.rowIdColumns(table, catalog=None, schema=None, nullable=True) -->\n\n" "Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a result set of columns that\n" "uniquely identify a row\n\n" "Each row fetched has the following columns:\n" " 0) scope\n" " 1) column_name\n" " 2) data_type\n" " 3) type_name\n" " 4) column_size\n" " 5) buffer_length\n" " 6) decimal_digits\n" " 7) pseudo_column"; static char rowVerColumns_doc[] = "C.rowIdColumns(table, catalog=None, schema=None, nullable=True) --> self\n\n" "Executes SQLSpecialColumns with SQL_ROWVER which creates a result set of columns that\n" "are automatically updated when any value in the row is updated.\n\n" "Each row fetched has the following columns:\n" " 0) scope\n" " 1) column_name\n" " 2) data_type\n" " 3) type_name\n" " 4) column_size\n" " 5) buffer_length\n" " 6) decimal_digits\n" " 7) pseudo_column"; char* Cursor_specialColumn_kwnames[] = { "table", "catalog", "schema", "nullable", 0 }; static PyObject* _specialColumns(PyObject* self, PyObject* args, PyObject* kwargs, SQLUSMALLINT nIdType) { const char* szTable; const char* szCatalog = 0; const char* szSchema = 0; PyObject* pNullable = Py_True; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|ssO", Cursor_specialColumn_kwnames, &szTable, &szCatalog, &szSchema, &pNullable)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; SQLUSMALLINT nNullable = (SQLUSMALLINT)(PyObject_IsTrue(pNullable) ? SQL_NULLABLE : SQL_NO_NULLS); Py_BEGIN_ALLOW_THREADS ret = SQLSpecialColumns(cur->hstmt, nIdType, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, SQL_SCOPE_TRANSACTION, nNullable); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLSpecialColumns", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static PyObject* Cursor_rowIdColumns(PyObject* self, PyObject* args, PyObject* kwargs) { return _specialColumns(self, args, kwargs, SQL_BEST_ROWID); } static PyObject* Cursor_rowVerColumns(PyObject* self, PyObject* args, PyObject* kwargs) { return _specialColumns(self, args, kwargs, SQL_ROWVER); } static char primaryKeys_doc[] = "C.primaryKeys(table, catalog=None, schema=None) --> self\n\n" "Creates a results set of column names that make up the primary key for a table\n" "by executing the SQLPrimaryKeys function.\n" "Each row fetched has the following columns:\n" " 0) table_cat\n" " 1) table_schem\n" " 2) table_name\n" " 3) column_name\n" " 4) key_seq\n" " 5) pk_name"; char* Cursor_primaryKeys_kwnames[] = { "table", "catalog", "schema", 0 }; static PyObject* Cursor_primaryKeys(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szTable; const char* szCatalog = 0; const char* szSchema = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|ss", Cursor_primaryKeys_kwnames, &szTable, &szCatalog, &szSchema)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLPrimaryKeys(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLPrimaryKeys", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char foreignKeys_doc[] = "C.foreignKeys(table=None, catalog=None, schema=None,\n" " foreignTable=None, foreignCatalog=None, foreignSchema=None) --> self\n\n" "Executes the SQLForeignKeys function and creates a results set of column names\n" "that are foreign keys in the specified table (columns in the specified table\n" "that refer to primary keys in other tables) or foreign keys in other tables\n" "that refer to the primary key in the specified table.\n\n" "Each row fetched has the following columns:\n" " 0) pktable_cat\n" " 1) pktable_schem\n" " 2) pktable_name\n" " 3) pkcolumn_name\n" " 4) fktable_cat\n" " 5) fktable_schem\n" " 6) fktable_name\n" " 7) fkcolumn_name\n" " 8) key_seq\n" " 9) update_rule\n" " 10) delete_rule\n" " 11) fk_name\n" " 12) pk_name\n" " 13) deferrability"; char* Cursor_foreignKeys_kwnames[] = { "table", "catalog", "schema", "foreignTable", "foreignCatalog", "foreignSchema", 0 }; static PyObject* Cursor_foreignKeys(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szTable = 0; const char* szCatalog = 0; const char* szSchema = 0; const char* szForeignTable = 0; const char* szForeignCatalog = 0; const char* szForeignSchema = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ssssss", Cursor_foreignKeys_kwnames, &szTable, &szCatalog, &szSchema, &szForeignTable, &szForeignCatalog, &szForeignSchema)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLForeignKeys(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, (SQLCHAR*)szForeignCatalog, SQL_NTS, (SQLCHAR*)szForeignSchema, SQL_NTS, (SQLCHAR*)szForeignTable, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLForeignKeys", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char getTypeInfo_doc[] = "C.getTypeInfo(sqlType=None) --> self\n\n" "Executes SQLGetTypeInfo a creates a result set with information about the\n" "specified data type or all data types supported by the ODBC driver if not\n" "specified.\n\n" "Each row fetched has the following columns:\n" " 0) type_name\n" " 1) data_type\n" " 2) column_size\n" " 3) literal_prefix\n" " 4) literal_suffix\n" " 5) create_params\n" " 6) nullable\n" " 7) case_sensitive\n" " 8) searchable\n" " 9) unsigned_attribute\n" "10) fixed_prec_scale\n" "11) auto_unique_value\n" "12) local_type_name\n" "13) minimum_scale\n" "14) maximum_scale\n" "15) sql_data_type\n" "16) sql_datetime_sub\n" "17) num_prec_radix\n" "18) interval_precision"; static PyObject* Cursor_getTypeInfo(PyObject* self, PyObject* args, PyObject* kwargs) { UNUSED(kwargs); SQLSMALLINT nDataType = SQL_ALL_TYPES; if (!PyArg_ParseTuple(args, "|i", &nDataType)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLGetTypeInfo(cur->hstmt, nDataType); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLGetTypeInfo", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static PyObject* Cursor_nextset(PyObject* self, PyObject* args) { UNUSED(args); Cursor* cur = Cursor_Validate(self, 0); if (!cur) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLMoreResults(cur->hstmt); Py_END_ALLOW_THREADS if (ret == SQL_NO_DATA) { free_results(cur, FREE_STATEMENT | KEEP_PREPARED); Py_RETURN_FALSE; } SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { // Note: The SQL Server driver sometimes returns HY007 here if multiple statements (separated by ;) were // submitted. This is not documented, but I've seen it with multiple successful inserts. free_results(cur, FREE_STATEMENT | KEEP_PREPARED); return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); } free_results(cur, KEEP_STATEMENT | KEEP_PREPARED); if (cCols != 0) { // A result set was created. if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, lowercase())) return 0; } SQLLEN cRows; Py_BEGIN_ALLOW_THREADS ret = SQLRowCount(cur->hstmt, &cRows); Py_END_ALLOW_THREADS cur->rowcount = (int)cRows; if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLRowCount", cur->cnxn->hdbc, cur->hstmt); Py_RETURN_TRUE; } static char procedureColumns_doc[] = "C.procedureColumns(procedure=None, catalog=None, schema=None) --> self\n\n" "Executes SQLProcedureColumns and creates a result set of information\n" "about stored procedure columns and results.\n" " 0) procedure_cat\n" " 1) procedure_schem\n" " 2) procedure_name\n" " 3) column_name\n" " 4) column_type\n" " 5) data_type\n" " 6) type_name\n" " 7) column_size\n" " 8) buffer_length\n" " 9) decimal_digits\n" " 10) num_prec_radix\n" " 11) nullable\n" " 12) remarks\n" " 13) column_def\n" " 14) sql_data_type\n" " 15) sql_datetime_sub\n" " 16) char_octet_length\n" " 17) ordinal_position\n" " 18) is_nullable"; char* Cursor_procedureColumns_kwnames[] = { "procedure", "catalog", "schema", 0 }; static PyObject* Cursor_procedureColumns(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szProcedure = 0; const char* szCatalog = 0; const char* szSchema = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|sss", Cursor_procedureColumns_kwnames, &szProcedure, &szCatalog, &szSchema)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLProcedureColumns(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szProcedure, SQL_NTS, 0, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLProcedureColumns", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char procedures_doc[] = "C.procedures(procedure=None, catalog=None, schema=None) --> self\n\n" "Executes SQLProcedures and creates a result set of information about the\n" "procedures in the data source.\n" "Each row fetched has the following columns:\n" " 0) procedure_cat\n" " 1) procedure_schem\n" " 2) procedure_name\n" " 3) num_input_params\n" " 4) num_output_params\n" " 5) num_result_sets\n" " 6) remarks\n" " 7) procedure_type"; char* Cursor_procedures_kwnames[] = { "procedure", "catalog", "schema", 0 }; static PyObject* Cursor_procedures(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szProcedure = 0; const char* szCatalog = 0; const char* szSchema = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|sss", Cursor_procedures_kwnames, &szProcedure, &szCatalog, &szSchema)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLProcedures(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szProcedure, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLProcedures", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char skip_doc[] = "skip(count) --> None\n" \ "\n" \ "Skips the next `count` records by calling SQLFetchScroll with SQL_FETCH_NEXT.\n" "For convenience, skip(0) is accepted and will do nothing."; static PyObject* Cursor_skip(PyObject* self, PyObject* args) { Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; int count; if (!PyArg_ParseTuple(args, "i", &count)) return 0; if (count == 0) Py_RETURN_NONE; // Note: I'm not sure about the performance implications of looping here -- I certainly would rather use // SQLFetchScroll(SQL_FETCH_RELATIVE, count), but it requires scrollable cursors which are often slower. I would // not expect skip to be used in performance intensive code since different SQL would probably be the "right" // answer instead of skip anyway. SQLRETURN ret = SQL_SUCCESS; Py_BEGIN_ALLOW_THREADS for (int i = 0; i < count && SQL_SUCCEEDED(ret); i++) ret = SQLFetchScroll(cursor->hstmt, SQL_FETCH_NEXT, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret) && ret != SQL_NO_DATA) return RaiseErrorFromHandle("SQLFetchScroll", cursor->cnxn->hdbc, cursor->hstmt); Py_RETURN_NONE; } static const char* commit_doc = "Commits any pending transaction to the database on the current connection,\n" "including those from other cursors.\n"; static PyObject* Cursor_commit(PyObject* self, PyObject* args) { Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cur) return 0; return Connection_endtrans(cur->cnxn, SQL_COMMIT); } static char rollback_doc[] = "Rolls back any pending transaction to the database on the current connection,\n" "including those from other cursors.\n"; static PyObject* Cursor_rollback(PyObject* self, PyObject* args) { Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cur) return 0; return Connection_endtrans(cur->cnxn, SQL_ROLLBACK); } static PyObject* Cursor_ignored(PyObject* self, PyObject* args) { UNUSED(self, args); Py_RETURN_NONE; } static char rowcount_doc[] = "This read-only attribute specifies the number of rows the last DML statement\n" " (INSERT, UPDATE, DELETE) affected. This is set to -1 for SELECT statements."; static char description_doc[] = "This read-only attribute is a sequence of 7-item sequences. Each of these\n" \ "sequences contains information describing one result column: (name, type_code,\n" \ "display_size, internal_size, precision, scale, null_ok). All values except\n" \ "name, type_code, and internal_size are None. The type_code entry will be the\n" \ "type object used to create values for that column (e.g. `str` or\n" \ "`datetime.datetime`).\n" \ "\n" \ "This attribute will be None for operations that do not return rows or if the\n" \ "cursor has not had an operation invoked via the execute() method yet.\n" \ "\n" \ "The type_code can be interpreted by comparing it to the Type Objects defined in\n" \ "the DB API and defined the pyodbc module: Date, Time, Timestamp, Binary,\n" \ "STRING, BINARY, NUMBER, and DATETIME."; static char arraysize_doc[] = "This read/write attribute specifies the number of rows to fetch at a time with\n" \ "fetchmany(). It defaults to 1 meaning to fetch a single row at a time."; static char connection_doc[] = "This read-only attribute return a reference to the Connection object on which\n" \ "the cursor was created.\n" \ "\n" \ "The attribute simplifies writing polymorph code in multi-connection\n" \ "environments."; static PyMemberDef Cursor_members[] = { {"rowcount", T_INT, offsetof(Cursor, rowcount), READONLY, rowcount_doc }, {"description", T_OBJECT_EX, offsetof(Cursor, description), READONLY, description_doc }, {"arraysize", T_INT, offsetof(Cursor, arraysize), 0, arraysize_doc }, {"connection", T_OBJECT_EX, offsetof(Cursor, cnxn), READONLY, connection_doc }, { 0 } }; static PyObject* Cursor_getnoscan(PyObject* self, void *closure) { UNUSED(closure); Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; SQLUINTEGER noscan = SQL_NOSCAN_OFF; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetStmtAttr(cursor->hstmt, SQL_ATTR_NOSCAN, (SQLPOINTER)&noscan, sizeof(SQLUINTEGER), 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { // Not supported? We're going to assume 'no'. Py_RETURN_FALSE; } if (noscan == SQL_NOSCAN_OFF) Py_RETURN_FALSE; Py_RETURN_TRUE; } static int Cursor_setnoscan(PyObject* self, PyObject* value, void *closure) { UNUSED(closure); Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return -1; if (value == 0) { PyErr_SetString(PyExc_TypeError, "Cannot delete the noscan attribute"); return -1; } uintptr_t noscan = PyObject_IsTrue(value) ? SQL_NOSCAN_ON : SQL_NOSCAN_OFF; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetStmtAttr(cursor->hstmt, SQL_ATTR_NOSCAN, (SQLPOINTER)noscan, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLSetStmtAttr(SQL_ATTR_NOSCAN)", cursor->cnxn->hdbc, cursor->hstmt); return -1; } return 0; } static PyGetSetDef Cursor_getsetters[] = { {"noscan", Cursor_getnoscan, Cursor_setnoscan, "NOSCAN statement attr", 0}, { 0 } }; static char executemany_doc[] = "executemany(sql, seq_of_params) --> Cursor | count | None\n" \ "\n" \ "Prepare a database query or command and then execute it against all parameter\n" \ "sequences found in the sequence seq_of_params.\n" \ "\n" \ "Only the result of the final execution is returned. See `execute` for a\n" \ "description of parameter passing the return value."; static char nextset_doc[] = "nextset() --> True | None\n" \ "\n" \ "Jumps to the next resultset if the last sql has multiple resultset." \ "Returns True if there is a next resultset otherwise None."; static char ignored_doc[] = "Ignored."; static char fetchone_doc[] = "fetchone() --> Row | None\n" \ "\n" \ "Fetch the next row of a query result set, returning a single Row instance, or\n" \ "None when no more data is available.\n" \ "\n" \ "A ProgrammingError exception is raised if the previous call to execute() did\n" \ "not produce any result set or no call was issued yet."; static char fetchall_doc[] = "fetchmany(size=cursor.arraysize) --> list of Rows\n" \ "\n" \ "Fetch the next set of rows of a query result, returning a list of Row\n" \ "instances. An empty list is returned when no more rows are available.\n" \ "\n" \ "The number of rows to fetch per call is specified by the parameter. If it is\n" \ "not given, the cursor's arraysize determines the number of rows to be\n" \ "fetched. The method should try to fetch as many rows as indicated by the size\n" \ "parameter. If this is not possible due to the specified number of rows not\n" \ "being available, fewer rows may be returned.\n" \ "\n" \ "A ProgrammingError exception is raised if the previous call to execute() did\n" \ "not produce any result set or no call was issued yet."; static char fetchmany_doc[] = "fetchmany() --> list of Rows\n" \ "\n" \ "Fetch all remaining rows of a query result, returning them as a list of Rows.\n" \ "An empty list is returned if there are no more rows.\n" \ "\n" \ "A ProgrammingError exception is raised if the previous call to execute() did\n" \ "not produce any result set or no call was issued yet."; static char enter_doc[] = "__enter__() -> self."; static PyObject* Cursor_enter(PyObject* self, PyObject* args) { UNUSED(args); Py_INCREF(self); return self; } static char exit_doc[] = "__exit__(*excinfo) -> None. Commits the connection if necessary.."; static PyObject* Cursor_exit(PyObject* self, PyObject* args) { Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; // If an error has occurred, `args` will be a tuple of 3 values. Otherwise it will be a tuple of 3 `None`s. I(PyTuple_Check(args)); if (cursor->cnxn->nAutoCommit == SQL_AUTOCOMMIT_OFF && PyTuple_GetItem(args, 0) == Py_None) { SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLEndTran(SQL_HANDLE_DBC, cursor->cnxn->hdbc, SQL_COMMIT); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLEndTran(SQL_COMMIT)", cursor->cnxn->hdbc, cursor->hstmt); } Py_RETURN_NONE; } static PyMethodDef Cursor_methods[] = { { "close", (PyCFunction)Cursor_close, METH_NOARGS, close_doc }, { "execute", (PyCFunction)Cursor_execute, METH_VARARGS, execute_doc }, { "executemany", (PyCFunction)Cursor_executemany, METH_VARARGS, executemany_doc }, { "setinputsizes", (PyCFunction)Cursor_ignored, METH_VARARGS, ignored_doc }, { "setoutputsize", (PyCFunction)Cursor_ignored, METH_VARARGS, ignored_doc }, { "fetchone", (PyCFunction)Cursor_fetchone, METH_NOARGS, fetchone_doc }, { "fetchall", (PyCFunction)Cursor_fetchall, METH_NOARGS, fetchall_doc }, { "fetchmany", (PyCFunction)Cursor_fetchmany, METH_VARARGS, fetchmany_doc }, { "nextset", (PyCFunction)Cursor_nextset, METH_NOARGS, nextset_doc }, { "tables", (PyCFunction)Cursor_tables, METH_VARARGS|METH_KEYWORDS, tables_doc }, { "columns", (PyCFunction)Cursor_columns, METH_VARARGS|METH_KEYWORDS, columns_doc }, { "statistics", (PyCFunction)Cursor_statistics, METH_VARARGS|METH_KEYWORDS, statistics_doc }, { "rowIdColumns", (PyCFunction)Cursor_rowIdColumns, METH_VARARGS|METH_KEYWORDS, rowIdColumns_doc }, { "rowVerColumns", (PyCFunction)Cursor_rowVerColumns, METH_VARARGS|METH_KEYWORDS, rowVerColumns_doc }, { "primaryKeys", (PyCFunction)Cursor_primaryKeys, METH_VARARGS|METH_KEYWORDS, primaryKeys_doc }, { "foreignKeys", (PyCFunction)Cursor_foreignKeys, METH_VARARGS|METH_KEYWORDS, foreignKeys_doc }, { "getTypeInfo", (PyCFunction)Cursor_getTypeInfo, METH_VARARGS|METH_KEYWORDS, getTypeInfo_doc }, { "procedures", (PyCFunction)Cursor_procedures, METH_VARARGS|METH_KEYWORDS, procedures_doc }, { "procedureColumns", (PyCFunction)Cursor_procedureColumns, METH_VARARGS|METH_KEYWORDS, procedureColumns_doc }, { "skip", (PyCFunction)Cursor_skip, METH_VARARGS, skip_doc }, { "commit", (PyCFunction)Cursor_commit, METH_NOARGS, commit_doc }, { "rollback", (PyCFunction)Cursor_rollback, METH_NOARGS, rollback_doc }, { "__enter__", Cursor_enter, METH_NOARGS, enter_doc }, { "__exit__", Cursor_exit, METH_VARARGS, exit_doc }, { 0, 0, 0, 0 } }; static char cursor_doc[] = "Cursor objects represent a database cursor, which is used to manage the context\n" \ "of a fetch operation. Cursors created from the same connection are not\n" \ "isolated, i.e., any changes done to the database by a cursor are immediately\n" \ "visible by the other cursors. Cursors created from different connections are\n" \ "isolated.\n" \ "\n" \ "Cursors implement the iterator protocol, so results can be iterated:\n" \ "\n" \ " cursor.execute(sql)\n" \ " for row in cursor:\n" \ " print row[0]"; PyTypeObject CursorType = { PyVarObject_HEAD_INIT(0, 0) "pyodbc.Cursor", // tp_name sizeof(Cursor), // tp_basicsize 0, // tp_itemsize (destructor)Cursor_dealloc, // destructor tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare 0, // tp_repr 0, // tp_as_number 0, // tp_as_sequence 0, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer #if defined(Py_TPFLAGS_HAVE_ITER) Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_ITER, #else Py_TPFLAGS_DEFAULT, #endif cursor_doc, // tp_doc 0, // tp_traverse 0, // tp_clear 0, // tp_richcompare 0, // tp_weaklistoffset Cursor_iter, // tp_iter Cursor_iternext, // tp_iternext Cursor_methods, // tp_methods Cursor_members, // tp_members Cursor_getsetters, // tp_getset 0, // tp_base 0, // tp_dict 0, // tp_descr_get 0, // tp_descr_set 0, // tp_dictoffset 0, // tp_init 0, // tp_alloc 0, // tp_new 0, // tp_free 0, // tp_is_gc 0, // tp_bases 0, // tp_mro 0, // tp_cache 0, // tp_subclasses 0, // tp_weaklist }; Cursor* Cursor_New(Connection* cnxn) { // Exported to allow the connection class to create cursors. #ifdef _MSC_VER #pragma warning(disable : 4365) #endif Cursor* cur = PyObject_NEW(Cursor, &CursorType); #ifdef _MSC_VER #pragma warning(default : 4365) #endif if (cur) { cur->cnxn = cnxn; cur->hstmt = SQL_NULL_HANDLE; cur->description = Py_None; cur->pPreparedSQL = 0; cur->paramcount = 0; cur->paramtypes = 0; cur->paramInfos = 0; cur->colinfos = 0; cur->arraysize = 1; cur->rowcount = -1; cur->map_name_to_index = 0; Py_INCREF(cnxn); Py_INCREF(cur->description); SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLAllocHandle(SQL_HANDLE_STMT, cnxn->hdbc, &cur->hstmt); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLAllocHandle", cnxn->hdbc, SQL_NULL_HANDLE); Py_DECREF(cur); return 0; } if (cnxn->timeout) { Py_BEGIN_ALLOW_THREADS ret = SQLSetStmtAttr(cur->hstmt, SQL_ATTR_QUERY_TIMEOUT, (SQLPOINTER)cnxn->timeout, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle("SQLSetStmtAttr(SQL_ATTR_QUERY_TIMEOUT)", cnxn->hdbc, cur->hstmt); Py_DECREF(cur); return 0; } } TRACE("cursor.new cnxn=%p hdbc=%d cursor=%p hstmt=%d\n", (Connection*)cur->cnxn, ((Connection*)cur->cnxn)->hdbc, cur, cur->hstmt); } return cur; } void Cursor_init() { PyDateTime_IMPORT; } pyodbc-3.0.7/src/errors.cpp0000666000175000017500000002136612031131304014262 0ustar dokodoko #include "pyodbc.h" #include "errors.h" #include "pyodbcmodule.h" // Exceptions struct SqlStateMapping { char* prefix; size_t prefix_len; PyObject** pexc_class; // Note: Double indirection (pexc_class) necessary because the pointer values are not // initialized during startup }; static const struct SqlStateMapping sql_state_mapping[] = { { "0A000", 5, &NotSupportedError }, { "40002", 5, &IntegrityError }, { "22", 2, &DataError }, { "23", 2, &IntegrityError }, { "24", 2, &ProgrammingError }, { "25", 2, &ProgrammingError }, { "42", 2, &ProgrammingError }, { "HYT00", 5, &OperationalError }, { "HYT01", 5, &OperationalError }, }; static PyObject* ExceptionFromSqlState(const char* sqlstate) { // Returns the appropriate Python exception class given a SQLSTATE value. if (sqlstate && *sqlstate) { for (size_t i = 0; i < _countof(sql_state_mapping); i++) if (memcmp(sqlstate, sql_state_mapping[i].prefix, sql_state_mapping[i].prefix_len) == 0) return *sql_state_mapping[i].pexc_class; } return Error; } PyObject* RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* format, ...) { PyObject *pAttrs = 0, *pError = 0; if (!sqlstate || !*sqlstate) sqlstate = "HY000"; if (!exc_class) exc_class = ExceptionFromSqlState(sqlstate); // Note: Don't use any native strprintf routines. With Py_ssize_t, we need "%zd", but VC .NET doesn't support it. // PyString_FromFormatV already takes this into account. va_list marker; va_start(marker, format); PyObject* pMsg = PyString_FromFormatV(format, marker); va_end(marker); if (!pMsg) { PyErr_NoMemory(); return 0; } // Create an exception with a 'sqlstate' attribute (set to None if we don't have one) whose 'args' attribute is a // tuple containing the message and sqlstate value. The 'sqlstate' attribute ensures it is easy to access in // Python (and more understandable to the reader than ex.args[1]), but putting it in the args ensures it shows up // in logs because of the default repr/str. pAttrs = Py_BuildValue("(Os)", pMsg, sqlstate); if (pAttrs) { pError = PyEval_CallObject(exc_class, pAttrs); if (pError) RaiseErrorFromException(pError); } Py_DECREF(pMsg); Py_XDECREF(pAttrs); Py_XDECREF(pError); return 0; } #if PY_MAJOR_VERSION < 3 #define PyString_CompareWithASCIIString(lhs, rhs) _strcmpi(PyString_AS_STRING(lhs), rhs) #else #define PyString_CompareWithASCIIString PyUnicode_CompareWithASCIIString #endif bool HasSqlState(PyObject* ex, const char* szSqlState) { // Returns true if `ex` is an exception and has the given SQLSTATE. It is safe to pass 0 for ex. bool has = false; if (ex) { PyObject* args = PyObject_GetAttrString(ex, "args"); if (args != 0) { PyObject* s = PySequence_GetItem(args, 1); if (s != 0 && PyString_Check(s)) { // const char* sz = PyString_AsString(s); // if (sz && _strcmpi(sz, szSqlState) == 0) // has = true; has = (PyString_CompareWithASCIIString(s, szSqlState) == 0); } Py_XDECREF(s); Py_DECREF(args); } } return has; } static PyObject* GetError(const char* sqlstate, PyObject* exc_class, PyObject* pMsg) { // pMsg // The error message. This function takes ownership of this object, so we'll free it if we fail to create an // error. PyObject *pSqlState=0, *pAttrs=0, *pError=0; if (!sqlstate || !*sqlstate) sqlstate = "HY000"; if (!exc_class) exc_class = ExceptionFromSqlState(sqlstate); pAttrs = PyTuple_New(2); if (!pAttrs) { Py_DECREF(pMsg); return 0; } PyTuple_SetItem(pAttrs, 1, pMsg); // pAttrs now owns the pMsg reference; steals a reference, does not increment pSqlState = PyString_FromString(sqlstate); if (!pSqlState) { Py_DECREF(pAttrs); return 0; } PyTuple_SetItem(pAttrs, 0, pSqlState); // pAttrs now owns the pSqlState reference pError = PyEval_CallObject(exc_class, pAttrs); // pError will incref pAttrs Py_XDECREF(pAttrs); return pError; } static const char* DEFAULT_ERROR = "The driver did not supply an error!"; PyObject* RaiseErrorFromHandle(const char* szFunction, HDBC hdbc, HSTMT hstmt) { // The exception is "set" in the interpreter. This function returns 0 so this can be used in a return statement. PyObject* pError = GetErrorFromHandle(szFunction, hdbc, hstmt); if (pError) { RaiseErrorFromException(pError); Py_DECREF(pError); } return 0; } PyObject* GetErrorFromHandle(const char* szFunction, HDBC hdbc, HSTMT hstmt) { TRACE("In RaiseError(%s)!\n", szFunction); // Creates and returns an exception from ODBC error information. // // ODBC can generate a chain of errors which we concatenate into one error message. We use the SQLSTATE from the // first message, which seems to be the most detailed, to determine the class of exception. // // If the function fails, for example, if it runs out of memory, zero is returned. // // szFunction // The name of the function that failed. Python generates a useful stack trace, but we often don't know where in // the C++ code we failed. SQLSMALLINT nHandleType; SQLHANDLE h; char sqlstate[6] = ""; SQLINTEGER nNativeError; SQLSMALLINT cchMsg; char sqlstateT[6]; char szMsg[1024]; PyObject* pMsg = 0; PyObject* pMsgPart = 0; if (hstmt != SQL_NULL_HANDLE) { nHandleType = SQL_HANDLE_STMT; h = hstmt; } else if (hdbc != SQL_NULL_HANDLE) { nHandleType = SQL_HANDLE_DBC; h = hdbc; } else { nHandleType = SQL_HANDLE_ENV; h = henv; } // unixODBC + PostgreSQL driver 07.01.0003 (Fedora 8 binaries from RPMs) crash if you call SQLGetDiagRec more // than once. I hate to do this, but I'm going to only call it once for non-Windows platforms for now... SQLSMALLINT iRecord = 1; for (;;) { szMsg[0] = 0; sqlstateT[0] = 0; nNativeError = 0; cchMsg = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetDiagRec(nHandleType, h, iRecord, (SQLCHAR*)sqlstateT, &nNativeError, (SQLCHAR*)szMsg, (short)(_countof(szMsg)-1), &cchMsg); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) break; // Not always NULL terminated (MS Access) sqlstateT[5] = 0; if (cchMsg != 0) { if (iRecord == 1) { // This is the first error message, so save the SQLSTATE for determining the exception class and append // the calling function name. memcpy(sqlstate, sqlstateT, sizeof(sqlstate[0]) * _countof(sqlstate)); pMsg = PyString_FromFormat("[%s] %s (%ld) (%s)", sqlstateT, szMsg, (long)nNativeError, szFunction); if (pMsg == 0) return 0; } else { // This is not the first error message, so append to the existing one. pMsgPart = PyString_FromFormat("; [%s] %s (%ld)", sqlstateT, szMsg, (long)nNativeError); if (pMsgPart == 0) { Py_XDECREF(pMsg); return 0; } PyString_ConcatAndDel(&pMsg, pMsgPart); } } iRecord++; #ifndef _MSC_VER // See non-Windows comment above break; #endif } if (pMsg == 0) { // This only happens using unixODBC. (Haven't tried iODBC yet.) Either the driver or the driver manager is // buggy and has signaled a fault without recording error information. sqlstate[0] = '\0'; pMsg = PyString_FromString(DEFAULT_ERROR); if (pMsg == 0) { PyErr_NoMemory(); return 0; } } return GetError(sqlstate, 0, pMsg); } static bool GetSqlState(HSTMT hstmt, char* szSqlState) { SQLCHAR szMsg[300]; SQLSMALLINT cbMsg = (SQLSMALLINT)(_countof(szMsg) - 1); SQLINTEGER nNative; SQLSMALLINT cchMsg; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetDiagRec(SQL_HANDLE_STMT, hstmt, 1, (SQLCHAR*)szSqlState, &nNative, szMsg, cbMsg, &cchMsg); Py_END_ALLOW_THREADS return SQL_SUCCEEDED(ret); } bool HasSqlState(HSTMT hstmt, const char* szSqlState) { char szActual[6]; if (!GetSqlState(hstmt, szActual)) return false; return memcmp(szActual, szSqlState, 5) == 0; } pyodbc-3.0.7/src/sqlwchar.h0000666000175000017500000000244112031131304014230 0ustar dokodoko #ifndef _PYODBCSQLWCHAR_H #define _PYODBCSQLWCHAR_H class SQLWChar { // An object designed to convert strings and Unicode objects to SQLWCHAR, hold the temporary buffer, and delete it // in the destructor. private: SQLWCHAR* pch; Py_ssize_t len; bool owns_memory; public: SQLWChar() { pch = 0; len = 0; owns_memory = false; } SQLWChar(PyObject* o); bool Convert(PyObject* o); void Free(); ~SQLWChar() { Free(); } void dump(); operator SQLWCHAR*() { return pch; } operator const SQLWCHAR*() const { return pch; } operator bool() const { return pch != 0; } Py_ssize_t size() const { return len; } SQLWCHAR* operator[] (Py_ssize_t i) { I(i <= len); // we'll allow access to the NULL? return &pch[i]; } const SQLWCHAR* operator[] (Py_ssize_t i) const { I(i <= len); // we'll allow access to the NULL? return &pch[i]; } }; // The size of a SQLWCHAR. extern Py_ssize_t SQLWCHAR_SIZE; // Allocate a new Unicode object, initialized from the given SQLWCHAR string. PyObject* PyUnicode_FromSQLWCHAR(const SQLWCHAR* sz, Py_ssize_t cch); SQLWCHAR* SQLWCHAR_FromUnicode(const Py_UNICODE* pch, Py_ssize_t len); #endif // _PYODBCSQLWCHAR_H pyodbc-3.0.7/src/pyodbcdbg.cpp0000666000175000017500000000502112031131304014671 0ustar dokodoko #include "pyodbc.h" void PrintBytes(void* p, size_t len) { unsigned char* pch = (unsigned char*)p; for (size_t i = 0; i < len; i++) printf("%02x ", (int)pch[i]); printf("\n"); } #ifdef PYODBC_TRACE void DebugTrace(const char* szFmt, ...) { va_list marker; va_start(marker, szFmt); vprintf(szFmt, marker); va_end(marker); } #endif #ifdef PYODBC_LEAK_CHECK // THIS IS NOT THREAD SAFE: This is only designed for the single-threaded unit tests! struct Allocation { const char* filename; int lineno; size_t len; void* pointer; int counter; }; static Allocation* allocs = 0; static int bufsize = 0; static int count = 0; static int allocCounter = 0; void* _pyodbc_malloc(const char* filename, int lineno, size_t len) { void* p = malloc(len); if (p == 0) return 0; if (count == bufsize) { allocs = (Allocation*)realloc(allocs, (bufsize + 20) * sizeof(Allocation)); if (allocs == 0) { // Yes we just lost the original pointer, but we don't care since everything is about to fail. This is a // debug leak check, not a production malloc that needs to be robust in low memory. bufsize = 0; count = 0; return 0; } bufsize += 20; } allocs[count].filename = filename; allocs[count].lineno = lineno; allocs[count].len = len; allocs[count].pointer = p; allocs[count].counter = allocCounter++; printf("malloc(%d): %s(%d) %d %p\n", allocs[count].counter, filename, lineno, (int)len, p); count += 1; return p; } void pyodbc_free(void* p) { if (p == 0) return; for (int i = 0; i < count; i++) { if (allocs[i].pointer == p) { printf("free(%d): %s(%d) %d %p i=%d\n", allocs[i].counter, allocs[i].filename, allocs[i].lineno, (int)allocs[i].len, allocs[i].pointer, i); memmove(&allocs[i], &allocs[i + 1], sizeof(Allocation) * (count - i - 1)); count -= 1; free(p); return; } } printf("FREE FAILED: %p\n", p); free(p); } void pyodbc_leak_check() { if (count == 0) { printf("NO LEAKS\n"); } else { printf("********************************************************************************\n"); printf("%d leaks\n", count); for (int i = 0; i < count; i++) printf("LEAK: %d %s(%d) len=%d\n", allocs[i].counter, allocs[i].filename, allocs[i].lineno, allocs[i].len); } } #endif pyodbc-3.0.7/src/row.cpp0000666000175000017500000004134512146217440013571 0ustar dokodoko // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pyodbc.h" #include "pyodbcmodule.h" #include "row.h" #include "wrapper.h" struct Row { // A Row must act like a sequence (a tuple of results) to meet the DB API specification, but we also allow values // to be accessed via lowercased column names. We also supply a `columns` attribute which returns the list of // column names. PyObject_HEAD // cursor.description, accessed as _description PyObject* description; // A Python dictionary mapping from column name to a PyInteger, used to access columns by name. PyObject* map_name_to_index; // The number of values in apValues. Py_ssize_t cValues; // The column values, stored as an array. PyObject** apValues; }; #define Row_Check(op) PyObject_TypeCheck(op, &RowType) #define Row_CheckExact(op) (Py_TYPE(op) == &RowType) void FreeRowValues(Py_ssize_t cValues, PyObject** apValues) { if (apValues) { for (Py_ssize_t i = 0; i < cValues; i++) Py_XDECREF(apValues[i]); pyodbc_free(apValues); } } static void Row_dealloc(PyObject* o) { // Note: Now that __newobj__ is available, our variables could be zero... Row* self = (Row*)o; Py_XDECREF(self->description); Py_XDECREF(self->map_name_to_index); FreeRowValues(self->cValues, self->apValues); PyObject_Del(self); } static PyObject* Row_getstate(PyObject* self) { // Returns a tuple containing the saved state. We don't really support empty rows, but unfortunately they can be // created now by the new constructor which was necessary for implementing pickling. In that case (everything is // zero), an empty tuple is returned. // Not exposed. Row* row = (Row*)self; if (row->description == 0) return PyTuple_New(0); Tuple state(PyTuple_New(2 + row->cValues)); if (!state.IsValid()) return 0; state[0] = row->description; state[1] = row->map_name_to_index; for (int i = 0; i < row->cValues; i++) state[i+2] = row->apValues[i]; for (int i = 0; i < 2 + row->cValues; i++) Py_XINCREF(state[i]); return state.Detach(); } static PyObject* new_check(PyObject* args) { // We don't support a normal constructor, so only allow this for unpickling. There should be a single arg that was // returned by Row_reduce. Make sure the sizes match. The desc and map should have one entry per column, which // should equal the number of remaining items. if (PyTuple_GET_SIZE(args) < 3) return 0; PyObject* desc = PyTuple_GET_ITEM(args, 0); PyObject* map = PyTuple_GET_ITEM(args, 1); if (!PyTuple_CheckExact(desc) || !PyDict_CheckExact(map)) return 0; Py_ssize_t cols = PyTuple_GET_SIZE(desc); if (PyDict_Size(map) != cols || PyTuple_GET_SIZE(args) - 2 != cols) return 0; PyObject** apValues = (PyObject**)pyodbc_malloc(sizeof(PyObject*) * cols); if (!apValues) return 0; for (int i = 0; i < cols; i++) { apValues[i] = PyTuple_GET_ITEM(args, i+2); Py_INCREF(apValues[i]); } // Row_Internal will incref desc and map. PyObject* self = (PyObject*)Row_InternalNew(desc, map, cols, apValues); if (!self) pyodbc_free(apValues); return self; } static PyObject* Row_new(PyTypeObject* type, PyObject* args, PyObject* kwargs) { UNUSED(kwargs); PyObject* row = new_check(args); if (row == 0) PyErr_SetString(PyExc_TypeError, "cannot create 'pyodbc.Row' instances"); return row; } Row* Row_InternalNew(PyObject* description, PyObject* map_name_to_index, Py_ssize_t cValues, PyObject** apValues) { // Called by other modules to create rows. Takes ownership of apValues. #ifdef _MSC_VER #pragma warning(disable : 4365) #endif Row* row = PyObject_NEW(Row, &RowType); #ifdef _MSC_VER #pragma warning(default : 4365) #endif if (row) { Py_INCREF(description); row->description = description; Py_INCREF(map_name_to_index); row->map_name_to_index = map_name_to_index; row->apValues = apValues; row->cValues = cValues; } else { FreeRowValues(cValues, apValues); } return row; } static PyObject* Row_getattro(PyObject* o, PyObject* name) { // Called to handle 'row.colname'. Row* self = (Row*)o; PyObject* index = PyDict_GetItem(self->map_name_to_index, name); if (index) { Py_ssize_t i = PyNumber_AsSsize_t(index, 0); Py_INCREF(self->apValues[i]); return self->apValues[i]; } return PyObject_GenericGetAttr(o, name); } static Py_ssize_t Row_length(PyObject* self) { return ((Row*)self)->cValues; } static int Row_contains(PyObject* o, PyObject* el) { // Implementation of contains. The documentation is not good (non-existent?), so I copied the following from the // PySequence_Contains documentation: Return -1 if error; 1 if ob in seq; 0 if ob not in seq. Row* self = (Row*)o; int cmp = 0; for (Py_ssize_t i = 0, c = self->cValues ; cmp == 0 && i < c; ++i) cmp = PyObject_RichCompareBool(el, self->apValues[i], Py_EQ); return cmp; } static PyObject* Row_item(PyObject* o, Py_ssize_t i) { // Apparently, negative indexes are handled by magic ;) -- they never make it here. Row* self = (Row*)o; if (i < 0 || i >= self->cValues) { PyErr_SetString(PyExc_IndexError, "tuple index out of range"); return NULL; } Py_INCREF(self->apValues[i]); return self->apValues[i]; } static int Row_ass_item(PyObject* o, Py_ssize_t i, PyObject* v) { // Implements row[i] = value. Row* self = (Row*)o; if (i < 0 || i >= self->cValues) { PyErr_SetString(PyExc_IndexError, "Row assignment index out of range"); return -1; } Py_XDECREF(self->apValues[i]); Py_INCREF(v); self->apValues[i] = v; return 0; } static int Row_setattro(PyObject* o, PyObject *name, PyObject* v) { Row* self = (Row*)o; PyObject* index = PyDict_GetItem(self->map_name_to_index, name); if (index) return Row_ass_item(o, PyNumber_AsSsize_t(index, 0), v); return PyObject_GenericSetAttr(o, name, v); } static PyObject* Row_repr(PyObject* o) { Row* self = (Row*)o; if (self->cValues == 0) return PyString_FromString("()"); Object pieces(PyTuple_New(self->cValues)); if (!pieces) return 0; Py_ssize_t length = 2 + (2 * (self->cValues-1)); // parens + ', ' separators for (Py_ssize_t i = 0; i < self->cValues; i++) { PyObject* piece = PyObject_Repr(self->apValues[i]); if (!piece) return 0; length += Text_Size(piece); PyTuple_SET_ITEM(pieces.Get(), i, piece); } if (self->cValues == 1) { // Need a trailing comma: (value,) length += 2; } PyObject* result = Text_New(length); if (!result) return 0; TEXT_T* buffer = Text_Buffer(result); Py_ssize_t offset = 0; buffer[offset++] = '('; for (Py_ssize_t i = 0; i < self->cValues; i++) { PyObject* item = PyTuple_GET_ITEM(pieces.Get(), i); memcpy(&buffer[offset], Text_Buffer(item), Text_Size(item) * sizeof(TEXT_T)); offset += Text_Size(item); if (i != self->cValues-1 || self->cValues == 1) { buffer[offset++] = ','; buffer[offset++] = ' '; } } buffer[offset++] = ')'; I(offset == length); return result; } static PyObject* Row_richcompare(PyObject* olhs, PyObject* orhs, int op) { if (!Row_Check(olhs) || !Row_Check(orhs)) { Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } Row* lhs = (Row*)olhs; Row* rhs = (Row*)orhs; if (lhs->cValues != rhs->cValues) { // Different sizes, so use the same rules as the tuple class. bool result; switch (op) { case Py_EQ: result = (lhs->cValues == rhs->cValues); break; case Py_GE: result = (lhs->cValues >= rhs->cValues); break; case Py_GT: result = (lhs->cValues > rhs->cValues); break; case Py_LE: result = (lhs->cValues <= rhs->cValues); break; case Py_LT: result = (lhs->cValues < rhs->cValues); break; case Py_NE: result = (lhs->cValues != rhs->cValues); break; default: // Can't get here, but don't have a cross-compiler way to silence this. result = false; } PyObject* p = result ? Py_True : Py_False; Py_INCREF(p); return p; } for (Py_ssize_t i = 0, c = lhs->cValues; i < c; i++) if (!PyObject_RichCompareBool(lhs->apValues[i], rhs->apValues[i], Py_EQ)) return PyObject_RichCompare(lhs->apValues[i], rhs->apValues[i], op); // All items are equal. switch (op) { case Py_EQ: case Py_GE: case Py_LE: Py_RETURN_TRUE; case Py_GT: case Py_LT: case Py_NE: break; } Py_RETURN_FALSE; } static PyObject* Row_subscript(PyObject* o, PyObject* key) { Row* row = (Row*)o; if (PyIndex_Check(key)) { Py_ssize_t i = PyNumber_AsSsize_t(key, PyExc_IndexError); if (i == -1 && PyErr_Occurred()) return 0; if (i < 0) i += row->cValues; if (i < 0 || i >= row->cValues) return PyErr_Format(PyExc_IndexError, "row index out of range index=%d len=%d", (int)i, (int)row->cValues); Py_INCREF(row->apValues[i]); return row->apValues[i]; } if (PySlice_Check(key)) { Py_ssize_t start, stop, step, slicelength; #if PY_VERSION_HEX >= 0x03020000 if (PySlice_GetIndicesEx(key, row->cValues, &start, &stop, &step, &slicelength) < 0) return 0; #else if (PySlice_GetIndicesEx((PySliceObject*)key, row->cValues, &start, &stop, &step, &slicelength) < 0) return 0; #endif if (slicelength <= 0) return PyTuple_New(0); if (start == 0 && step == 1 && slicelength == row->cValues) { Py_INCREF(o); return o; } Object result(PyTuple_New(slicelength)); if (!result) return 0; for (Py_ssize_t i = 0, index = start; i < slicelength; i++, index += step) { PyTuple_SET_ITEM(result.Get(), i, row->apValues[index]); Py_INCREF(row->apValues[index]); } return result.Detach(); } return PyErr_Format(PyExc_TypeError, "row indices must be integers, not %.200s", Py_TYPE(key)->tp_name); } static PySequenceMethods row_as_sequence = { Row_length, // sq_length 0, // sq_concat 0, // sq_repeat Row_item, // sq_item 0, // was_sq_slice Row_ass_item, // sq_ass_item 0, // sq_ass_slice Row_contains, // sq_contains }; static PyMappingMethods row_as_mapping = { Row_length, // mp_length Row_subscript, // mp_subscript 0, // mp_ass_subscript }; static char description_doc[] = "The Cursor.description sequence from the Cursor that created this row."; static PyMemberDef Row_members[] = { { "cursor_description", T_OBJECT_EX, offsetof(Row, description), READONLY, description_doc }, { 0 } }; static PyObject* Row_reduce(PyObject* self, PyObject* args) { PyObject* state = Row_getstate(self); if (!state) return 0; return Py_BuildValue("ON", Py_TYPE(self), state); } static PyMethodDef Row_methods[] = { { "__reduce__", (PyCFunction)Row_reduce, METH_NOARGS, 0 }, { 0, 0, 0, 0 } }; static char row_doc[] = "Row objects are sequence objects that hold query results.\n" "\n" "They are similar to tuples in that they cannot be resized and new attributes\n" "cannot be added, but individual elements can be replaced. This allows data to\n" "be \"fixed up\" after being fetched. (For example, datetimes may be replaced by\n" "those with time zones attached.)\n" "\n" " row[0] = row[0].replace(tzinfo=timezone)\n" " print row[0]\n" "\n" "Additionally, individual values can be optionally be accessed or replaced by\n" "name. Non-alphanumeric characters are replaced with an underscore.\n" "\n" " cursor.execute(\"select customer_id, [Name With Spaces] from tmp\")\n" " row = cursor.fetchone()\n" " print row.customer_id, row.Name_With_Spaces\n" "\n" "If using this non-standard feature, it is often convenient to specifiy the name\n" "using the SQL 'as' keyword:\n" "\n" " cursor.execute(\"select count(*) as total from tmp\")\n" " row = cursor.fetchone()\n" " print row.total"; PyTypeObject RowType = { PyVarObject_HEAD_INIT(NULL, 0) "pyodbc.Row", // tp_name sizeof(Row), // tp_basicsize 0, // tp_itemsize Row_dealloc, // tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare Row_repr, // tp_repr 0, // tp_as_number &row_as_sequence, // tp_as_sequence &row_as_mapping, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str Row_getattro, // tp_getattro Row_setattro, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, // tp_flags row_doc, // tp_doc 0, // tp_traverse 0, // tp_clear Row_richcompare, // tp_richcompare 0, // tp_weaklistoffset 0, // tp_iter 0, // tp_iternext Row_methods, // tp_methods Row_members, // tp_members 0, // tp_getset 0, // tp_base 0, // tp_dict 0, // tp_descr_get 0, // tp_descr_set 0, // tp_dictoffset 0, // tp_init 0, // tp_alloc Row_new, // tp_new 0, // tp_free 0, // tp_is_gc 0, // tp_bases 0, // tp_mro 0, // tp_cache 0, // tp_subclasses 0, // tp_weaklist }; pyodbc-3.0.7/src/getdata.h0000666000175000017500000000053212031131304014014 0ustar dokodoko #ifndef _GETDATA_H_ #define _GETDATA_H_ void GetData_init(); PyObject* GetData(Cursor* cur, Py_ssize_t iCol); /** * If this sql type has a user-defined conversion, the index into the connection's `conv_funcs` array is returned. * Otherwise -1 is returned. */ int GetUserConvIndex(Cursor* cur, SQLSMALLINT sql_type); #endif // _GETDATA_H_ pyodbc-3.0.7/src/pyodbc.h0000666000175000017500000000736412034534654013717 0ustar dokodoko // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef PYODBC_H #define PYODBC_H #ifdef _MSC_VER #define _CRT_SECURE_NO_WARNINGS #include #include typedef __int64 INT64; typedef unsigned __int64 UINT64; #else typedef unsigned char byte; typedef unsigned int UINT; typedef long long INT64; typedef unsigned long long UINT64; #define _strcmpi strcasecmp #ifdef __MINGW32__ #include #include #else inline int max(int lhs, int rhs) { return (rhs > lhs) ? rhs : lhs; } #endif #endif #ifdef __SUN__ #include #endif #define PY_SSIZE_T_CLEAN 1 #include #include #include #include #include #include #ifdef __CYGWIN__ #include #endif #include #include #if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PyInt_AsSsize_t PyInt_AsLong #define lenfunc inquiry #define ssizeargfunc intargfunc #define ssizeobjargproc intobjargproc #endif #ifndef _countof #define _countof(a) (sizeof(a) / sizeof(a[0])) #endif inline bool IsSet(DWORD grf, DWORD flags) { return (grf & flags) == flags; } #ifdef UNUSED #undef UNUSED #endif inline void UNUSED(...) { } #include #if defined(__SUNPRO_CC) || defined(__SUNPRO_C) || (defined(__GNUC__) && !defined(__MINGW32__)) #ifndef __FreeBSD__ #include #endif #define CDECL cdecl #define min(X,Y) ((X) < (Y) ? (X) : (Y)) #define max(X,Y) ((X) > (Y) ? (X) : (Y)) #define _alloca alloca inline void _strlwr(char* name) { while (*name) { *name = tolower(*name); name++; } } #else #define CDECL #endif #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) // Building an actual debug version of Python is so much of a pain that it never happens. I'm providing release-build // versions of assertions. #if defined(PYODBC_ASSERT) && defined(_MSC_VER) #include inline void FailAssert(const char* szFile, size_t line, const char* szExpr) { printf("assertion failed: %s(%d)\n%s\n", szFile, line, szExpr); __debugbreak(); // _CrtDbgBreak(); } #define I(expr) if (!(expr)) FailAssert(__FILE__, __LINE__, #expr); #define N(expr) if (expr) FailAssert(__FILE__, __LINE__, #expr); #else #define I(expr) #define N(expr) #endif #ifdef PYODBC_TRACE void DebugTrace(const char* szFmt, ...); #else inline void DebugTrace(const char* szFmt, ...) { UNUSED(szFmt); } #endif #define TRACE DebugTrace #ifdef PYODBC_LEAK_CHECK #define pyodbc_malloc(len) _pyodbc_malloc(__FILE__, __LINE__, len) void* _pyodbc_malloc(const char* filename, int lineno, size_t len); void pyodbc_free(void* p); void pyodbc_leak_check(); #else #define pyodbc_malloc malloc #define pyodbc_free free #endif void PrintBytes(void* p, size_t len); #include "pyodbccompat.h" #define HERE printf("%s(%d)\n", __FILE__, __LINE__) #endif // pyodbc_h pyodbc-3.0.7/src/pyodbccompat.h0000666000175000017500000000775512031131304015105 0ustar dokodoko #ifndef PYODBCCOMPAT_H #define PYODBCCOMPAT_H // Macros and functions to ease compatibility with Python 2 and Python 3. #if PY_VERSION_HEX >= 0x03000000 && PY_VERSION_HEX < 0x03010000 #error Python 3.0 is not supported. Please use 3.1 and higher. #endif // Macros introduced in 2.6, backported for 2.4 and 2.5. #ifndef PyVarObject_HEAD_INIT #define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, #endif #ifndef Py_TYPE #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #endif // Macros were introduced in 2.6 to map "bytes" to "str" in Python 2. Back port to 2.5. #if PY_VERSION_HEX >= 0x02060000 #include #else #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Size PyString_Size #define _PyBytes_Resize _PyString_Resize #endif // Used for items that are ANSI in Python 2 and Unicode in Python 3 or in int 2 and long in 3. #if PY_MAJOR_VERSION >= 3 #define PyString_FromString PyUnicode_FromString #define PyString_FromStringAndSize PyUnicode_FromStringAndSize #define PyString_Check PyUnicode_Check #define PyString_Type PyUnicode_Type #define PyString_Size PyUnicode_Size #define PyInt_FromLong PyLong_FromLong #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_Type PyLong_Type #define PyString_FromFormatV PyUnicode_FromFormatV #define PyString_FromFormat PyUnicode_FromFormat #define Py_TPFLAGS_HAVE_ITER 0 #define PyString_AsString PyUnicode_AsString #define TEXT_T Py_UNICODE #define PyString_Join PyUnicode_Join inline void PyString_ConcatAndDel(PyObject** lhs, PyObject* rhs) { PyUnicode_Concat(*lhs, rhs); Py_DECREF(rhs); } #else #include #include #include #define TEXT_T char #define PyString_Join _PyString_Join #endif inline PyObject* Text_New(Py_ssize_t length) { // Returns a new, uninitialized String (Python 2) or Unicode object (Python 3) object. #if PY_MAJOR_VERSION < 3 return PyString_FromStringAndSize(0, length); #else return PyUnicode_FromUnicode(0, length); #endif } inline TEXT_T* Text_Buffer(PyObject* o) { #if PY_MAJOR_VERSION < 3 I(PyString_Check(o)); return PyString_AS_STRING(o); #else I(PyUnicode_Check(o)); return PyUnicode_AS_UNICODE(o); #endif } inline bool Text_Check(PyObject* o) { // A compatibility function that determines if the object is a string, based on the version of Python. // For Python 2, an ASCII or Unicode string is allowed. For Python 3, it must be a Unicode object. #if PY_MAJOR_VERSION < 3 if (o && PyString_Check(o)) return true; #endif return o && PyUnicode_Check(o); } bool Text_EqualsI(PyObject* lhs, const char* rhs); // Case-insensitive comparison for a Python string object (Unicode in Python 3, ASCII or Unicode in Python 2) against // an ASCII string. If lhs is 0 or None, false is returned. inline Py_ssize_t Text_Size(PyObject* o) { #if PY_MAJOR_VERSION < 3 if (o && PyString_Check(o)) return PyString_GET_SIZE(o); #endif return (o && PyUnicode_Check(o)) ? PyUnicode_GET_SIZE(o) : 0; } inline Py_ssize_t TextCopyToUnicode(Py_UNICODE* buffer, PyObject* o) { // Copies a String or Unicode object to a Unicode buffer and returns the number of characters copied. // No NULL terminator is appended! #if PY_MAJOR_VERSION < 3 if (PyBytes_Check(o)) { const Py_ssize_t cch = PyBytes_GET_SIZE(o); const char * pch = PyBytes_AS_STRING(o); for (Py_ssize_t i = 0; i < cch; i++) *buffer++ = (Py_UNICODE)*pch++; return cch; } else { #endif Py_ssize_t cch = PyUnicode_GET_SIZE(o); memcpy(buffer, PyUnicode_AS_UNICODE(o), cch * sizeof(Py_UNICODE)); return cch; #if PY_MAJOR_VERSION < 3 } #endif } #endif // PYODBCCOMPAT_H pyodbc-3.0.7/src/cursor.h0000666000175000017500000001254012031131304013722 0ustar dokodoko /* * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef CURSOR_H #define CURSOR_H struct Connection; struct ColumnInfo { SQLSMALLINT sql_type; // The column size from SQLDescribeCol. For character types, this is the maximum length, not including the NULL // terminator. For binary values, this is the maximum length. For numeric and decimal values, it is the defined // number of digits. For example, the precision of a column defined as NUMERIC(10,3) is 10. // // This value can be SQL_NO_TOTAL in which case the driver doesn't know the maximum length, such as for LONGVARCHAR // fields. SQLULEN column_size; // Tells us if an integer type is signed or unsigned. This is determined after a query using SQLColAttribute. All // of the integer types are the same size whether signed and unsigned, so we can allocate memory ahead of time // without knowing this. We use this during the fetch when converting to a Python integer or long. bool is_unsigned; }; struct ParamInfo { // The following correspond to the SQLBindParameter parameters. SQLSMALLINT ValueType; SQLSMALLINT ParameterType; SQLULEN ColumnSize; SQLSMALLINT DecimalDigits; // The value pointer that will be bound. If `alloc` is true, this was allocated with malloc and must be freed. // Otherwise it is zero or points into memory owned by the original Python parameter. SQLPOINTER ParameterValuePtr; SQLLEN BufferLength; SQLLEN StrLen_or_Ind; // If true, the memory in ParameterValuePtr was allocated via malloc and must be freed. bool allocated; // The python object containing the parameter value. A reference to this object should be held until we have // finished using memory owned by it. PyObject* pParam; // Optional data. If used, ParameterValuePtr will point into this. union { unsigned char ch; long l; INT64 i64; double dbl; TIMESTAMP_STRUCT timestamp; DATE_STRUCT date; TIME_STRUCT time; } Data; }; struct Cursor { PyObject_HEAD // The Connection object (which is a PyObject) that created this cursor. Connection* cnxn; // Set to SQL_NULL_HANDLE when the cursor is closed. HSTMT hstmt; // // SQL Parameters // // If non-zero, a pointer to the previously prepared SQL string, allowing us to skip the prepare and gathering of // parameter data. PyObject* pPreparedSQL; // The number of parameter markers in pPreparedSQL. This will be zero when pPreparedSQL is zero but is set // immediately after preparing the SQL. int paramcount; // If non-zero, a pointer to an array of SQL type values allocated via malloc. This is zero until we actually ask // for the type of parameter, which is only when a parameter is None (NULL). At that point, the entire array is // allocated (length == paramcount) but all entries are set to SQL_UNKNOWN_TYPE. SQLSMALLINT* paramtypes; // If non-zero, a pointer to a buffer containing the actual parameters bound. If pPreparedSQL is zero, this should // be freed using free and set to zero. // // Even if the same SQL statement is executed twice, the parameter bindings are redone from scratch since we try to // bind into the Python objects directly. ParamInfo* paramInfos; // // Result Information // // An array of ColumnInfos, allocated via malloc. This will be zero when closed or when there are no query // results. ColumnInfo* colinfos; // The description tuple described in the DB API 2.0 specification. Set to None when there are no results. PyObject* description; int arraysize; // The Cursor.rowcount attribute from the DB API specification. int rowcount; // A dictionary that maps from column name (PyString) to index into the result columns (PyInteger). This is // constructued during an execute and shared with each row (reference counted) to implement accessing results by // column name. // // This duplicates some ODBC functionality, but allows us to use Row objects after the statement is closed and // should use less memory than putting each column into the Row's __dict__. // // Since this is shared by Row objects, it cannot be reused. New dictionaries are created for every execute. This // will be zero whenever there are no results. PyObject* map_name_to_index; }; void Cursor_init(); Cursor* Cursor_New(Connection* cnxn); PyObject* Cursor_execute(PyObject* self, PyObject* args); #endif pyodbc-3.0.7/src/pyodbcmodule.cpp0000666000175000017500000010564512031131304015437 0ustar dokodoko // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pyodbc.h" #include "pyodbcmodule.h" #include "connection.h" #include "cursor.h" #include "row.h" #include "wrapper.h" #include "errors.h" #include "getdata.h" #include "cnxninfo.h" #include "params.h" #include "dbspecific.h" #include #include #include static PyObject* MakeConnectionString(PyObject* existing, PyObject* parts); PyObject* pModule = 0; static char module_doc[] = "A database module for accessing databases via ODBC.\n" "\n" "This module conforms to the DB API 2.0 specification while providing\n" "non-standard convenience features. Only standard Python data types are used\n" "so additional DLLs are not required.\n" "\n" "Static Variables:\n\n" "version\n" " The module version string. Official builds will have a version in the format\n" " `major.minor.revision`, such as 2.1.7. Beta versions will have -beta appended,\n" " such as 2.1.8-beta03. (This would be a build before the official 2.1.8 release.)\n" " Some special test builds will have a test name (the git branch name) prepended,\n" " such as fixissue90-2.1.8-beta03.\n" "\n" "apilevel\n" " The string constant '2.0' indicating this module supports DB API level 2.0.\n" "\n" "lowercase\n" " A Boolean that controls whether column names in result rows are lowercased.\n" " This can be changed any time and affects queries executed after the change.\n" " The default is False. This can be useful when database columns have\n" " inconsistent capitalization.\n" "\n" "pooling\n" " A Boolean indicating whether connection pooling is enabled. This is a\n" " global (HENV) setting, so it can only be modified before the first\n" " connection is made. The default is True, which enables ODBC connection\n" " pooling.\n" "\n" "threadsafety\n" " The integer 1, indicating that threads may share the module but not\n" " connections. Note that connections and cursors may be used by different\n" " threads, just not at the same time.\n" "\n" "qmark\n" " The string constant 'qmark' to indicate parameters are identified using\n" " question marks."; PyObject* Error; PyObject* Warning; PyObject* InterfaceError; PyObject* DatabaseError; PyObject* InternalError; PyObject* OperationalError; PyObject* ProgrammingError; PyObject* IntegrityError; PyObject* DataError; PyObject* NotSupportedError; struct ExcInfo { const char* szName; const char* szFullName; PyObject** ppexc; PyObject** ppexcParent; const char* szDoc; }; #define MAKEEXCINFO(name, parent, doc) { #name, "pyodbc." #name, &name, &parent, doc } static ExcInfo aExcInfos[] = { MAKEEXCINFO(Error, PyExc_Exception, "Exception that is the base class of all other error exceptions. You can use\n" "this to catch all errors with one single 'except' statement."), MAKEEXCINFO(Warning, PyExc_Exception, "Exception raised for important warnings like data truncations while inserting,\n" " etc."), MAKEEXCINFO(InterfaceError, Error, "Exception raised for errors that are related to the database interface rather\n" "than the database itself."), MAKEEXCINFO(DatabaseError, Error, "Exception raised for errors that are related to the database."), MAKEEXCINFO(DataError, DatabaseError, "Exception raised for errors that are due to problems with the processed data\n" "like division by zero, numeric value out of range, etc."), MAKEEXCINFO(OperationalError, DatabaseError, "Exception raised for errors that are related to the database's operation and\n" "not necessarily under the control of the programmer, e.g. an unexpected\n" "disconnect occurs, the data source name is not found, a transaction could not\n" "be processed, a memory allocation error occurred during processing, etc."), MAKEEXCINFO(IntegrityError, DatabaseError, "Exception raised when the relational integrity of the database is affected,\n" "e.g. a foreign key check fails."), MAKEEXCINFO(InternalError, DatabaseError, "Exception raised when the database encounters an internal error, e.g. the\n" "cursor is not valid anymore, the transaction is out of sync, etc."), MAKEEXCINFO(ProgrammingError, DatabaseError, "Exception raised for programming errors, e.g. table not found or already\n" "exists, syntax error in the SQL statement, wrong number of parameters\n" "specified, etc."), MAKEEXCINFO(NotSupportedError, DatabaseError, "Exception raised in case a method or database API was used which is not\n" "supported by the database, e.g. requesting a .rollback() on a connection that\n" "does not support transaction or has transactions turned off.") }; PyObject* decimal_type; HENV henv = SQL_NULL_HANDLE; Py_UNICODE chDecimal = '.'; // Initialize the global decimal character and thousands separator character, used when parsing decimal // objects. // static void init_locale_info() { Object module = PyImport_ImportModule("locale"); if (!module) { PyErr_Clear(); return; } Object ldict = PyObject_CallMethod(module, "localeconv", 0); if (!ldict) { PyErr_Clear(); return; } PyObject* value = PyDict_GetItemString(ldict, "decimal_point"); if (value) { if (PyBytes_Check(value) && PyBytes_Size(value) == 1) chDecimal = (Py_UNICODE)PyBytes_AS_STRING(value)[0]; if (PyUnicode_Check(value) && PyUnicode_GET_SIZE(value) == 1) chDecimal = PyUnicode_AS_UNICODE(value)[0]; } } static bool import_types() { // In Python 2.5 final, PyDateTime_IMPORT no longer works unless the datetime module was previously // imported (among other problems). PyObject* pdt = PyImport_ImportModule("datetime"); if (!pdt) return false; PyDateTime_IMPORT; Cursor_init(); CnxnInfo_init(); GetData_init(); if (!Params_init()) return false; PyObject* decimalmod = PyImport_ImportModule("decimal"); if (!decimalmod) { PyErr_SetString(PyExc_RuntimeError, "Unable to import decimal"); return false; } decimal_type = PyObject_GetAttrString(decimalmod, "Decimal"); Py_DECREF(decimalmod); if (decimal_type == 0) PyErr_SetString(PyExc_RuntimeError, "Unable to import decimal.Decimal."); return decimal_type != 0; } static bool AllocateEnv() { PyObject* pooling = PyObject_GetAttrString(pModule, "pooling"); bool bPooling = pooling == Py_True; Py_DECREF(pooling); if (bPooling) { if (!SQL_SUCCEEDED(SQLSetEnvAttr(SQL_NULL_HANDLE, SQL_ATTR_CONNECTION_POOLING, (SQLPOINTER)SQL_CP_ONE_PER_HENV, sizeof(int)))) { Py_FatalError("Unable to set SQL_ATTR_CONNECTION_POOLING attribute."); return false; } } if (!SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &henv))) { Py_FatalError("Can't initialize module pyodbc. SQLAllocEnv failed."); return false; } if (!SQL_SUCCEEDED(SQLSetEnvAttr(henv, SQL_ATTR_ODBC_VERSION, (SQLPOINTER)SQL_OV_ODBC3, sizeof(int)))) { Py_FatalError("Unable to set SQL_ATTR_ODBC_VERSION attribute."); return false; } return true; } // Map DB API recommended keywords to ODBC keywords. struct keywordmap { const char* oldname; const char* newname; PyObject* newnameObject; // PyString object version of newname, created as needed. }; static keywordmap keywordmaps[] = { { "user", "uid", 0 }, { "password", "pwd", 0 }, { "host", "server", 0 }, }; static PyObject* mod_connect(PyObject* self, PyObject* args, PyObject* kwargs) { UNUSED(self); Object pConnectString = 0; int fAutoCommit = 0; int fAnsi = 0; // force ansi int fUnicodeResults = 0; int fReadOnly = 0; long timeout = 0; Py_ssize_t size = args ? PyTuple_Size(args) : 0; if (size > 1) { PyErr_SetString(PyExc_TypeError, "function takes at most 1 non-keyword argument"); return 0; } if (size == 1) { if (!PyString_Check(PyTuple_GET_ITEM(args, 0)) && !PyUnicode_Check(PyTuple_GET_ITEM(args, 0))) return PyErr_Format(PyExc_TypeError, "argument 1 must be a string or unicode object"); pConnectString.Attach(PyUnicode_FromObject(PyTuple_GetItem(args, 0))); if (!pConnectString.IsValid()) return 0; } if (kwargs && PyDict_Size(kwargs) > 0) { Object partsdict(PyDict_New()); if (!partsdict.IsValid()) return 0; Py_ssize_t pos = 0; PyObject* key = 0; PyObject* value = 0; Object okey; // in case we need to allocate a new key while (PyDict_Next(kwargs, &pos, &key, &value)) { if (!Text_Check(key)) return PyErr_Format(PyExc_TypeError, "Dictionary items passed to connect must be strings"); // // Note: key and value are *borrowed*. // // // Check for the two non-connection string keywords we accept. (If we get many more of these, create something // // table driven. Are we sure there isn't a Python function to parse keywords but leave those it doesn't know?) // const char* szKey = PyString_AsString(key); if (Text_EqualsI(key, "autocommit")) { fAutoCommit = PyObject_IsTrue(value); continue; } if (Text_EqualsI(key, "ansi")) { fAnsi = PyObject_IsTrue(value); continue; } if (Text_EqualsI(key, "unicode_results")) { fUnicodeResults = PyObject_IsTrue(value); continue; } if (Text_EqualsI(key, "timeout")) { timeout = PyInt_AsLong(value); if (PyErr_Occurred()) return 0; continue; } if (Text_EqualsI(key, "readonly")) { fReadOnly = PyObject_IsTrue(value); continue; } // Map DB API recommended names to ODBC names (e.g. user --> uid). for (size_t i = 0; i < _countof(keywordmaps); i++) { if (Text_EqualsI(key, keywordmaps[i].oldname)) { if (keywordmaps[i].newnameObject == 0) { keywordmaps[i].newnameObject = PyString_FromString(keywordmaps[i].newname); if (keywordmaps[i].newnameObject == 0) return 0; } key = keywordmaps[i].newnameObject; break; } } PyObject* str = PyObject_Str(value); // convert if necessary if (!str) return 0; if (PyDict_SetItem(partsdict.Get(), key, str) == -1) { Py_XDECREF(str); return 0; } Py_XDECREF(str); } if (PyDict_Size(partsdict.Get())) pConnectString.Attach(MakeConnectionString(pConnectString.Get(), partsdict)); } if (!pConnectString.IsValid()) return PyErr_Format(PyExc_TypeError, "no connection information was passed"); if (henv == SQL_NULL_HANDLE) { if (!AllocateEnv()) return 0; } return (PyObject*)Connection_New(pConnectString.Get(), fAutoCommit != 0, fAnsi != 0, fUnicodeResults != 0, timeout, fReadOnly != 0); } static PyObject* mod_datasources(PyObject* self) { UNUSED(self); if (henv == SQL_NULL_HANDLE && !AllocateEnv()) return 0; PyObject* result = PyDict_New(); if (!result) return 0; SQLCHAR szDSN[SQL_MAX_DSN_LENGTH]; SWORD cbDSN; SQLCHAR szDesc[200]; SWORD cbDesc; SQLUSMALLINT nDirection = SQL_FETCH_FIRST; SQLRETURN ret; for (;;) { Py_BEGIN_ALLOW_THREADS ret = SQLDataSources(henv, nDirection, szDSN, _countof(szDSN), &cbDSN, szDesc, _countof(szDesc), &cbDesc); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) break; PyDict_SetItemString(result, (const char*)szDSN, PyString_FromString((const char*)szDesc)); nDirection = SQL_FETCH_NEXT; } if (ret != SQL_NO_DATA) { Py_DECREF(result); return RaiseErrorFromHandle("SQLDataSources", SQL_NULL_HANDLE, SQL_NULL_HANDLE); } return result; } static PyObject* mod_timefromticks(PyObject* self, PyObject* args) { UNUSED(self); PyObject* num; if (!PyArg_ParseTuple(args, "O", &num)) return 0; if (!PyNumber_Check(num)) return PyErr_Format(PyExc_TypeError, "TimeFromTicks requires a number."); Object l(PyNumber_Long(num)); if (!l) return 0; time_t t = PyLong_AsLong(num); struct tm* fields = localtime(&t); return PyTime_FromTime(fields->tm_hour, fields->tm_min, fields->tm_sec, 0); } static PyObject* mod_datefromticks(PyObject* self, PyObject* args) { UNUSED(self); return PyDate_FromTimestamp(args); } static PyObject* mod_timestampfromticks(PyObject* self, PyObject* args) { UNUSED(self); return PyDateTime_FromTimestamp(args); } static char connect_doc[] = "connect(str, autocommit=False, ansi=False, timeout=0, **kwargs) --> Connection\n" "\n" "Accepts an ODBC connection string and returns a new Connection object.\n" "\n" "The connection string will be passed to SQLDriverConnect, so a DSN connection\n" "can be created using:\n" "\n" " cnxn = pyodbc.connect('DSN=DataSourceName;UID=user;PWD=password')\n" "\n" "To connect without requiring a DSN, specify the driver and connection\n" "information:\n" "\n" " DRIVER={SQL Server};SERVER=localhost;DATABASE=testdb;UID=user;PWD=password\n" "\n" "Note the use of braces when a value contains spaces. Refer to SQLDriverConnect\n" "documentation or the documentation of your ODBC driver for details.\n" "\n" "The connection string can be passed as the string `str`, as a list of keywords,\n" "or a combination of the two. Any keywords except autocommit, ansi, and timeout\n" "(see below) are simply added to the connection string.\n" "\n" " connect('server=localhost;user=me')\n" " connect(server='localhost', user='me')\n" " connect('server=localhost', user='me')\n" "\n" "The DB API recommends the keywords 'user', 'password', and 'host', but these\n" "are not valid ODBC keywords, so these will be converted to 'uid', 'pwd', and\n" "'server'.\n" "\n" "Special Keywords\n" "\n" "The following specal keywords are processed by pyodbc and are not added to the\n" "connection string. (If you must use these in your connection string, pass them\n" "as a string, not as keywords.)\n" "\n" " autocommit\n" " If False or zero, the default, transactions are created automatically as\n" " defined in the DB API 2. If True or non-zero, the connection is put into\n" " ODBC autocommit mode and statements are committed automatically.\n" " \n" " ansi\n" " By default, pyodbc first attempts to connect using the Unicode version of\n" " SQLDriverConnectW. If the driver returns IM001 indicating it does not\n" " support the Unicode version, the ANSI version is tried. Any other SQLSTATE\n" " is turned into an exception. Setting ansi to true skips the Unicode\n" " attempt and only connects using the ANSI version. This is useful for\n" " drivers that return the wrong SQLSTATE (or if pyodbc is out of date and\n" " should support other SQLSTATEs).\n" " \n" " timeout\n" " An integer login timeout in seconds, used to set the SQL_ATTR_LOGIN_TIMEOUT\n" " attribute of the connection. The default is 0 which means the database's\n" " default timeout, if any, is used.\n"; static char timefromticks_doc[] = "TimeFromTicks(ticks) --> datetime.time\n" "\n" "Returns a time object initialized from the given ticks value (number of seconds\n" "since the epoch; see the documentation of the standard Python time module for\n" "details)."; static char datefromticks_doc[] = "DateFromTicks(ticks) --> datetime.date\n" \ "\n" \ "Returns a date object initialized from the given ticks value (number of seconds\n" \ "since the epoch; see the documentation of the standard Python time module for\n" \ "details)."; static char timestampfromticks_doc[] = "TimestampFromTicks(ticks) --> datetime.datetime\n" \ "\n" \ "Returns a datetime object initialized from the given ticks value (number of\n" \ "seconds since the epoch; see the documentation of the standard Python time\n" \ "module for details"; static char datasources_doc[] = "dataSources() -> { DSN : Description }\n" \ "\n" \ "Returns a dictionary mapping available DSNs to their descriptions."; #ifdef PYODBC_LEAK_CHECK static PyObject* mod_leakcheck(PyObject* self, PyObject* args) { UNUSED(self, args); pyodbc_leak_check(); Py_RETURN_NONE; } #endif #ifdef WINVER static char drivers_doc[] = "drivers() -> [ driver, ... ]\n\nReturns a list of installed drivers"; static PyObject* mod_drivers(PyObject* self, PyObject* args) { UNUSED(self, args); RegKey key; long ret = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\ODBC\\ODBCINST.INI\\ODBC Drivers", 0, KEY_QUERY_VALUE, &key.hkey); if (ret != ERROR_SUCCESS) return PyErr_Format(PyExc_RuntimeError, "Unable to access the driver list in the registry. error=%ld", ret); Object results(PyList_New(0)); DWORD index = 0; char name[255]; DWORD length = _countof(name); while (RegEnumValue(key, index++, name, &length, 0, 0, 0, 0) == ERROR_SUCCESS) { if (ret != ERROR_SUCCESS) return PyErr_Format(PyExc_RuntimeError, "RegEnumKeyEx failed with error %ld\n", ret); PyObject* oname = PyString_FromStringAndSize(name, (Py_ssize_t)length); if (!oname) return 0; if (PyList_Append(results.Get(), oname) != 0) { Py_DECREF(oname); return 0; } length = _countof(name); } return results.Detach(); } #endif static PyMethodDef pyodbc_methods[] = { { "connect", (PyCFunction)mod_connect, METH_VARARGS|METH_KEYWORDS, connect_doc }, { "TimeFromTicks", (PyCFunction)mod_timefromticks, METH_VARARGS, timefromticks_doc }, { "DateFromTicks", (PyCFunction)mod_datefromticks, METH_VARARGS, datefromticks_doc }, { "TimestampFromTicks", (PyCFunction)mod_timestampfromticks, METH_VARARGS, timestampfromticks_doc }, { "dataSources", (PyCFunction)mod_datasources, METH_NOARGS, datasources_doc }, #ifdef WINVER { "drivers", (PyCFunction)mod_drivers, METH_NOARGS, drivers_doc }, #endif #ifdef PYODBC_LEAK_CHECK { "leakcheck", (PyCFunction)mod_leakcheck, METH_NOARGS, 0 }, #endif { 0, 0, 0, 0 } }; static void ErrorInit() { // Called during startup to initialize any variables that will be freed by ErrorCleanup. Error = 0; Warning = 0; InterfaceError = 0; DatabaseError = 0; InternalError = 0; OperationalError = 0; ProgrammingError = 0; IntegrityError = 0; DataError = 0; NotSupportedError = 0; decimal_type = 0; } static void ErrorCleanup() { // Called when an error occurs during initialization to release any objects we may have accessed. Make sure each // item released was initialized to zero. (Static objects are -- non-statics should be initialized in ErrorInit.) Py_XDECREF(Error); Py_XDECREF(Warning); Py_XDECREF(InterfaceError); Py_XDECREF(DatabaseError); Py_XDECREF(InternalError); Py_XDECREF(OperationalError); Py_XDECREF(ProgrammingError); Py_XDECREF(IntegrityError); Py_XDECREF(DataError); Py_XDECREF(NotSupportedError); Py_XDECREF(decimal_type); } struct ConstantDef { const char* szName; int value; }; #define MAKECONST(v) { #v, v } static const ConstantDef aConstants[] = { MAKECONST(SQL_UNKNOWN_TYPE), MAKECONST(SQL_CHAR), MAKECONST(SQL_VARCHAR), MAKECONST(SQL_LONGVARCHAR), MAKECONST(SQL_WCHAR), MAKECONST(SQL_WVARCHAR), MAKECONST(SQL_WLONGVARCHAR), MAKECONST(SQL_DECIMAL), MAKECONST(SQL_NUMERIC), MAKECONST(SQL_SMALLINT), MAKECONST(SQL_INTEGER), MAKECONST(SQL_REAL), MAKECONST(SQL_FLOAT), MAKECONST(SQL_DOUBLE), MAKECONST(SQL_BIT), MAKECONST(SQL_TINYINT), MAKECONST(SQL_BIGINT), MAKECONST(SQL_BINARY), MAKECONST(SQL_VARBINARY), MAKECONST(SQL_LONGVARBINARY), MAKECONST(SQL_TYPE_DATE), MAKECONST(SQL_TYPE_TIME), MAKECONST(SQL_TYPE_TIMESTAMP), MAKECONST(SQL_SS_TIME2), MAKECONST(SQL_SS_XML), MAKECONST(SQL_INTERVAL_MONTH), MAKECONST(SQL_INTERVAL_YEAR), MAKECONST(SQL_INTERVAL_YEAR_TO_MONTH), MAKECONST(SQL_INTERVAL_DAY), MAKECONST(SQL_INTERVAL_HOUR), MAKECONST(SQL_INTERVAL_MINUTE), MAKECONST(SQL_INTERVAL_SECOND), MAKECONST(SQL_INTERVAL_DAY_TO_HOUR), MAKECONST(SQL_INTERVAL_DAY_TO_MINUTE), MAKECONST(SQL_INTERVAL_DAY_TO_SECOND), MAKECONST(SQL_INTERVAL_HOUR_TO_MINUTE), MAKECONST(SQL_INTERVAL_HOUR_TO_SECOND), MAKECONST(SQL_INTERVAL_MINUTE_TO_SECOND), MAKECONST(SQL_GUID), MAKECONST(SQL_NULLABLE), MAKECONST(SQL_NO_NULLS), MAKECONST(SQL_NULLABLE_UNKNOWN), // MAKECONST(SQL_INDEX_BTREE), // MAKECONST(SQL_INDEX_CLUSTERED), // MAKECONST(SQL_INDEX_CONTENT), // MAKECONST(SQL_INDEX_HASHED), // MAKECONST(SQL_INDEX_OTHER), MAKECONST(SQL_SCOPE_CURROW), MAKECONST(SQL_SCOPE_TRANSACTION), MAKECONST(SQL_SCOPE_SESSION), MAKECONST(SQL_PC_UNKNOWN), MAKECONST(SQL_PC_NOT_PSEUDO), MAKECONST(SQL_PC_PSEUDO), // SQLGetInfo MAKECONST(SQL_ACCESSIBLE_PROCEDURES), MAKECONST(SQL_ACCESSIBLE_TABLES), MAKECONST(SQL_ACTIVE_ENVIRONMENTS), MAKECONST(SQL_AGGREGATE_FUNCTIONS), MAKECONST(SQL_ALTER_DOMAIN), MAKECONST(SQL_ALTER_TABLE), MAKECONST(SQL_ASYNC_MODE), MAKECONST(SQL_BATCH_ROW_COUNT), MAKECONST(SQL_BATCH_SUPPORT), MAKECONST(SQL_BOOKMARK_PERSISTENCE), MAKECONST(SQL_CATALOG_LOCATION), MAKECONST(SQL_CATALOG_NAME), MAKECONST(SQL_CATALOG_NAME_SEPARATOR), MAKECONST(SQL_CATALOG_TERM), MAKECONST(SQL_CATALOG_USAGE), MAKECONST(SQL_COLLATION_SEQ), MAKECONST(SQL_COLUMN_ALIAS), MAKECONST(SQL_CONCAT_NULL_BEHAVIOR), MAKECONST(SQL_CONVERT_FUNCTIONS), MAKECONST(SQL_CONVERT_VARCHAR), MAKECONST(SQL_CORRELATION_NAME), MAKECONST(SQL_CREATE_ASSERTION), MAKECONST(SQL_CREATE_CHARACTER_SET), MAKECONST(SQL_CREATE_COLLATION), MAKECONST(SQL_CREATE_DOMAIN), MAKECONST(SQL_CREATE_SCHEMA), MAKECONST(SQL_CREATE_TABLE), MAKECONST(SQL_CREATE_TRANSLATION), MAKECONST(SQL_CREATE_VIEW), MAKECONST(SQL_CURSOR_COMMIT_BEHAVIOR), MAKECONST(SQL_CURSOR_ROLLBACK_BEHAVIOR), // MAKECONST(SQL_CURSOR_ROLLBACK_SQL_CURSOR_SENSITIVITY), MAKECONST(SQL_DATABASE_NAME), MAKECONST(SQL_DATA_SOURCE_NAME), MAKECONST(SQL_DATA_SOURCE_READ_ONLY), MAKECONST(SQL_DATETIME_LITERALS), MAKECONST(SQL_DBMS_NAME), MAKECONST(SQL_DBMS_VER), MAKECONST(SQL_DDL_INDEX), MAKECONST(SQL_DEFAULT_TXN_ISOLATION), MAKECONST(SQL_DESCRIBE_PARAMETER), MAKECONST(SQL_DM_VER), MAKECONST(SQL_DRIVER_HDESC), MAKECONST(SQL_DRIVER_HENV), MAKECONST(SQL_DRIVER_HLIB), MAKECONST(SQL_DRIVER_HSTMT), MAKECONST(SQL_DRIVER_NAME), MAKECONST(SQL_DRIVER_ODBC_VER), MAKECONST(SQL_DRIVER_VER), MAKECONST(SQL_DROP_ASSERTION), MAKECONST(SQL_DROP_CHARACTER_SET), MAKECONST(SQL_DROP_COLLATION), MAKECONST(SQL_DROP_DOMAIN), MAKECONST(SQL_DROP_SCHEMA), MAKECONST(SQL_DROP_TABLE), MAKECONST(SQL_DROP_TRANSLATION), MAKECONST(SQL_DROP_VIEW), MAKECONST(SQL_DYNAMIC_CURSOR_ATTRIBUTES1), MAKECONST(SQL_DYNAMIC_CURSOR_ATTRIBUTES2), MAKECONST(SQL_EXPRESSIONS_IN_ORDERBY), MAKECONST(SQL_FILE_USAGE), MAKECONST(SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1), MAKECONST(SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2), MAKECONST(SQL_GETDATA_EXTENSIONS), MAKECONST(SQL_GROUP_BY), MAKECONST(SQL_IDENTIFIER_CASE), MAKECONST(SQL_IDENTIFIER_QUOTE_CHAR), MAKECONST(SQL_INDEX_KEYWORDS), MAKECONST(SQL_INFO_SCHEMA_VIEWS), MAKECONST(SQL_INSERT_STATEMENT), MAKECONST(SQL_INTEGRITY), MAKECONST(SQL_KEYSET_CURSOR_ATTRIBUTES1), MAKECONST(SQL_KEYSET_CURSOR_ATTRIBUTES2), MAKECONST(SQL_KEYWORDS), MAKECONST(SQL_LIKE_ESCAPE_CLAUSE), MAKECONST(SQL_MAX_ASYNC_CONCURRENT_STATEMENTS), MAKECONST(SQL_MAX_BINARY_LITERAL_LEN), MAKECONST(SQL_MAX_CATALOG_NAME_LEN), MAKECONST(SQL_MAX_CHAR_LITERAL_LEN), MAKECONST(SQL_MAX_COLUMNS_IN_GROUP_BY), MAKECONST(SQL_MAX_COLUMNS_IN_INDEX), MAKECONST(SQL_MAX_COLUMNS_IN_ORDER_BY), MAKECONST(SQL_MAX_COLUMNS_IN_SELECT), MAKECONST(SQL_MAX_COLUMNS_IN_TABLE), MAKECONST(SQL_MAX_COLUMN_NAME_LEN), MAKECONST(SQL_MAX_CONCURRENT_ACTIVITIES), MAKECONST(SQL_MAX_CURSOR_NAME_LEN), MAKECONST(SQL_MAX_DRIVER_CONNECTIONS), MAKECONST(SQL_MAX_IDENTIFIER_LEN), MAKECONST(SQL_MAX_INDEX_SIZE), MAKECONST(SQL_MAX_PROCEDURE_NAME_LEN), MAKECONST(SQL_MAX_ROW_SIZE), MAKECONST(SQL_MAX_ROW_SIZE_INCLUDES_LONG), MAKECONST(SQL_MAX_SCHEMA_NAME_LEN), MAKECONST(SQL_MAX_STATEMENT_LEN), MAKECONST(SQL_MAX_TABLES_IN_SELECT), MAKECONST(SQL_MAX_TABLE_NAME_LEN), MAKECONST(SQL_MAX_USER_NAME_LEN), MAKECONST(SQL_MULTIPLE_ACTIVE_TXN), MAKECONST(SQL_MULT_RESULT_SETS), MAKECONST(SQL_NEED_LONG_DATA_LEN), MAKECONST(SQL_NON_NULLABLE_COLUMNS), MAKECONST(SQL_NULL_COLLATION), MAKECONST(SQL_NUMERIC_FUNCTIONS), MAKECONST(SQL_ODBC_INTERFACE_CONFORMANCE), MAKECONST(SQL_ODBC_VER), MAKECONST(SQL_OJ_CAPABILITIES), MAKECONST(SQL_ORDER_BY_COLUMNS_IN_SELECT), MAKECONST(SQL_PARAM_ARRAY_ROW_COUNTS), MAKECONST(SQL_PARAM_ARRAY_SELECTS), MAKECONST(SQL_PARAM_TYPE_UNKNOWN), MAKECONST(SQL_PARAM_INPUT), MAKECONST(SQL_PARAM_INPUT_OUTPUT), MAKECONST(SQL_PARAM_OUTPUT), MAKECONST(SQL_RETURN_VALUE), MAKECONST(SQL_RESULT_COL), MAKECONST(SQL_PROCEDURES), MAKECONST(SQL_PROCEDURE_TERM), MAKECONST(SQL_QUOTED_IDENTIFIER_CASE), MAKECONST(SQL_ROW_UPDATES), MAKECONST(SQL_SCHEMA_TERM), MAKECONST(SQL_SCHEMA_USAGE), MAKECONST(SQL_SCROLL_OPTIONS), MAKECONST(SQL_SEARCH_PATTERN_ESCAPE), MAKECONST(SQL_SERVER_NAME), MAKECONST(SQL_SPECIAL_CHARACTERS), MAKECONST(SQL_SQL92_DATETIME_FUNCTIONS), MAKECONST(SQL_SQL92_FOREIGN_KEY_DELETE_RULE), MAKECONST(SQL_SQL92_FOREIGN_KEY_UPDATE_RULE), MAKECONST(SQL_SQL92_GRANT), MAKECONST(SQL_SQL92_NUMERIC_VALUE_FUNCTIONS), MAKECONST(SQL_SQL92_PREDICATES), MAKECONST(SQL_SQL92_RELATIONAL_JOIN_OPERATORS), MAKECONST(SQL_SQL92_REVOKE), MAKECONST(SQL_SQL92_ROW_VALUE_CONSTRUCTOR), MAKECONST(SQL_SQL92_STRING_FUNCTIONS), MAKECONST(SQL_SQL92_VALUE_EXPRESSIONS), MAKECONST(SQL_SQL_CONFORMANCE), MAKECONST(SQL_STANDARD_CLI_CONFORMANCE), MAKECONST(SQL_STATIC_CURSOR_ATTRIBUTES1), MAKECONST(SQL_STATIC_CURSOR_ATTRIBUTES2), MAKECONST(SQL_STRING_FUNCTIONS), MAKECONST(SQL_SUBQUERIES), MAKECONST(SQL_SYSTEM_FUNCTIONS), MAKECONST(SQL_TABLE_TERM), MAKECONST(SQL_TIMEDATE_ADD_INTERVALS), MAKECONST(SQL_TIMEDATE_DIFF_INTERVALS), MAKECONST(SQL_TIMEDATE_FUNCTIONS), MAKECONST(SQL_TXN_CAPABLE), MAKECONST(SQL_TXN_ISOLATION_OPTION), MAKECONST(SQL_UNION), MAKECONST(SQL_USER_NAME), MAKECONST(SQL_XOPEN_CLI_YEAR), }; static bool CreateExceptions() { for (unsigned int i = 0; i < _countof(aExcInfos); i++) { ExcInfo& info = aExcInfos[i]; PyObject* classdict = PyDict_New(); if (!classdict) return false; PyObject* doc = PyString_FromString(info.szDoc); if (!doc) { Py_DECREF(classdict); return false; } PyDict_SetItemString(classdict, "__doc__", doc); Py_DECREF(doc); *info.ppexc = PyErr_NewException((char*)info.szFullName, *info.ppexcParent, classdict); if (*info.ppexc == 0) { Py_DECREF(classdict); return false; } // Keep a reference for our internal (C++) use. Py_INCREF(*info.ppexc); PyModule_AddObject(pModule, (char*)info.szName, *info.ppexc); } return true; } #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "pyodbc", // m_name module_doc, -1, // m_size pyodbc_methods, // m_methods 0, // m_reload 0, // m_traverse 0, // m_clear 0, // m_free }; #define MODRETURN(v) v #else #define MODRETURN(v) #endif PyMODINIT_FUNC #if PY_MAJOR_VERSION >= 3 PyInit_pyodbc() #else initpyodbc(void) #endif { ErrorInit(); if (PyType_Ready(&ConnectionType) < 0 || PyType_Ready(&CursorType) < 0 || PyType_Ready(&RowType) < 0 || PyType_Ready(&CnxnInfoType) < 0) return MODRETURN(0); Object module; #if PY_MAJOR_VERSION >= 3 module.Attach(PyModule_Create(&moduledef)); #else module.Attach(Py_InitModule4("pyodbc", pyodbc_methods, module_doc, NULL, PYTHON_API_VERSION)); #endif pModule = module.Get(); if (!module || !import_types() || !CreateExceptions()) return MODRETURN(0); init_locale_info(); const char* szVersion = TOSTRING(PYODBC_VERSION); PyModule_AddStringConstant(module, "version", (char*)szVersion); PyModule_AddIntConstant(module, "threadsafety", 1); PyModule_AddStringConstant(module, "apilevel", "2.0"); PyModule_AddStringConstant(module, "paramstyle", "qmark"); PyModule_AddObject(module, "pooling", Py_True); Py_INCREF(Py_True); PyModule_AddObject(module, "lowercase", Py_False); Py_INCREF(Py_False); PyModule_AddObject(module, "Connection", (PyObject*)&ConnectionType); Py_INCREF((PyObject*)&ConnectionType); PyModule_AddObject(module, "Cursor", (PyObject*)&CursorType); Py_INCREF((PyObject*)&CursorType); PyModule_AddObject(module, "Row", (PyObject*)&RowType); Py_INCREF((PyObject*)&RowType); // Add the SQL_XXX defines from ODBC. for (unsigned int i = 0; i < _countof(aConstants); i++) PyModule_AddIntConstant(module, (char*)aConstants[i].szName, aConstants[i].value); PyModule_AddObject(module, "Date", (PyObject*)PyDateTimeAPI->DateType); Py_INCREF((PyObject*)PyDateTimeAPI->DateType); PyModule_AddObject(module, "Time", (PyObject*)PyDateTimeAPI->TimeType); Py_INCREF((PyObject*)PyDateTimeAPI->TimeType); PyModule_AddObject(module, "Timestamp", (PyObject*)PyDateTimeAPI->DateTimeType); Py_INCREF((PyObject*)PyDateTimeAPI->DateTimeType); PyModule_AddObject(module, "DATETIME", (PyObject*)PyDateTimeAPI->DateTimeType); Py_INCREF((PyObject*)PyDateTimeAPI->DateTimeType); PyModule_AddObject(module, "STRING", (PyObject*)&PyString_Type); Py_INCREF((PyObject*)&PyString_Type); PyModule_AddObject(module, "NUMBER", (PyObject*)&PyFloat_Type); Py_INCREF((PyObject*)&PyFloat_Type); PyModule_AddObject(module, "ROWID", (PyObject*)&PyInt_Type); Py_INCREF((PyObject*)&PyInt_Type); PyObject* binary_type; #if PY_VERSION_HEX >= 0x02060000 binary_type = (PyObject*)&PyByteArray_Type; #else binary_type = (PyObject*)&PyBuffer_Type; #endif PyModule_AddObject(module, "BINARY", binary_type); Py_INCREF(binary_type); PyModule_AddObject(module, "Binary", binary_type); Py_INCREF(binary_type); I(null_binary != 0); // must be initialized first PyModule_AddObject(module, "BinaryNull", null_binary); PyModule_AddIntConstant(module, "UNICODE_SIZE", sizeof(Py_UNICODE)); PyModule_AddIntConstant(module, "SQLWCHAR_SIZE", sizeof(SQLWCHAR)); if (!PyErr_Occurred()) { module.Detach(); } else { ErrorCleanup(); } return MODRETURN(pModule); } #ifdef WINVER BOOL WINAPI DllMain( HINSTANCE hMod, DWORD fdwReason, LPVOID lpvReserved ) { UNUSED(hMod, fdwReason, lpvReserved); return TRUE; } #endif static PyObject* MakeConnectionString(PyObject* existing, PyObject* parts) { // Creates a connection string from an optional existing connection string plus a dictionary of keyword value // pairs. // // existing // Optional Unicode connection string we will be appending to. Used when a partial connection string is passed // in, followed by keyword parameters: // // connect("driver={x};database={y}", user='z') // // parts // A dictionary of text keywords and text values that will be appended. I(PyUnicode_Check(existing)); Py_ssize_t length = 0; // length in *characters* if (existing) length = Text_Size(existing) + 1; // + 1 to add a trailing semicolon Py_ssize_t pos = 0; PyObject* key = 0; PyObject* value = 0; while (PyDict_Next(parts, &pos, &key, &value)) { length += Text_Size(key) + 1 + Text_Size(value) + 1; // key=value; } PyObject* result = PyUnicode_FromUnicode(0, length); if (!result) return 0; Py_UNICODE* buffer = PyUnicode_AS_UNICODE(result); Py_ssize_t offset = 0; if (existing) { offset += TextCopyToUnicode(&buffer[offset], existing); buffer[offset++] = (Py_UNICODE)';'; } pos = 0; while (PyDict_Next(parts, &pos, &key, &value)) { offset += TextCopyToUnicode(&buffer[offset], key); buffer[offset++] = (Py_UNICODE)'='; offset += TextCopyToUnicode(&buffer[offset], value); buffer[offset++] = (Py_UNICODE)';'; } I(offset == length); return result; } pyodbc-3.0.7/src/row.h0000666000175000017500000000301212146217440013223 0ustar dokodoko /* * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef ROW_H #define ROW_H struct Row; /* * Used to make a new row from an array of column values. */ Row* Row_InternalNew(PyObject* description, PyObject* map_name_to_index, Py_ssize_t cValues, PyObject** apValues); /* * Dereferences each object in apValues and frees apValue. This is the internal format used by rows. * * cValues: The number of items to free in apValues. * * apValues: The array of values. This can be NULL. */ void FreeRowValues(Py_ssize_t cValues, PyObject** apValues); extern PyTypeObject RowType; #define Row_Check(op) PyObject_TypeCheck(op, &RowType) #define Row_CheckExact(op) (Py_TYPE(op) == &RowType) #endif pyodbc-3.0.7/src/pyodbccompat.cpp0000666000175000017500000000133312031131304015422 0ustar dokodoko #include "pyodbc.h" bool Text_EqualsI(PyObject* lhs, const char* rhs) { #if PY_MAJOR_VERSION < 3 // In Python 2, allow ANSI strings. if (lhs && PyString_Check(lhs)) return _strcmpi(PyString_AS_STRING(lhs), rhs) == 0; #endif if (lhs == 0 || !PyUnicode_Check(lhs)) return false; Py_ssize_t cchLHS = PyUnicode_GET_SIZE(lhs); Py_ssize_t cchRHS = (Py_ssize_t)strlen(rhs); if (cchLHS != cchRHS) return false; Py_UNICODE* p = PyUnicode_AS_UNICODE(lhs); for (Py_ssize_t i = 0; i < cchLHS; i++) { int chL = (int)Py_UNICODE_TOUPPER(p[i]); int chR = (int)toupper(rhs[i]); if (chL != chR) return false; } return true; } pyodbc-3.0.7/src/cnxninfo.h0000666000175000017500000000353612146217440014251 0ustar dokodoko // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef CNXNINFO_H #define CNXNINFO_H struct Connection; extern PyTypeObject CnxnInfoType; struct CnxnInfo { PyObject_HEAD // The description of these fields is in the connection structure. char odbc_major; char odbc_minor; bool supports_describeparam; int datetime_precision; // Do we need to use SQL_LEN_DATA_AT_EXEC? Some drivers (e.g. FreeTDS 0.91) have problems with long values, so // we'll use SQL_DATA_AT_EXEC when possible. If this is true, however, we'll need to pass the length. bool need_long_data_len; // These are from SQLGetTypeInfo.column_size, so the char ones are in characters, not bytes. int varchar_maxlength; int wvarchar_maxlength; int binary_maxlength; }; void CnxnInfo_init(); // Looks-up or creates a CnxnInfo object for the given connection string. The connection string can be a Unicode or // String object. PyObject* GetConnectionInfo(PyObject* pConnectionString, Connection* cnxn); #endif // CNXNINFO_H pyodbc-3.0.7/src/getdata.cpp0000666000175000017500000005604712034534654014405 0ustar dokodoko // The functions for reading a single value from the database using SQLGetData. There is a different function for // every data type. #include "pyodbc.h" #include "pyodbcmodule.h" #include "cursor.h" #include "connection.h" #include "errors.h" #include "dbspecific.h" #include "sqlwchar.h" #include "wrapper.h" #include void GetData_init() { PyDateTime_IMPORT; } class DataBuffer { // Manages memory that GetDataString uses to read data in chunks. We use the same function (GetDataString) to read // variable length data for 3 different types of data: binary, ANSI, and Unicode. This class abstracts out the // memory management details to keep the function simple. // // There are 3 potential data buffer types we deal with in GetDataString: // // 1) Binary, which is a simple array of 8-bit bytes. // 2) ANSI text, which is an array of chars with a NULL terminator. // 3) Unicode text, which is an array of SQLWCHARs with a NULL terminator. // // When dealing with Unicode, there are two widths we have to be aware of: (1) SQLWCHAR and (2) Py_UNICODE. If // these are the same we can use a PyUnicode object so we don't have to allocate our own buffer and then the // Unicode object. If they are not the same (e.g. OS/X where wchar_t-->4 Py_UNICODE-->2) then we need to maintain // our own buffer and pass it to the PyUnicode object later. Many Linux distros are now using UCS4, so Py_UNICODE // will be larger than SQLWCHAR. // // To reduce heap fragmentation, we perform the initial read into an array on the stack since we don't know the // length of the data. If the data doesn't fit, this class then allocates new memory. If the first read gives us // the length, then we create a Python object of the right size and read into its memory. private: SQLSMALLINT dataType; char* buffer; Py_ssize_t bufferSize; // How big is the buffer. int bytesUsed; // How many elements have been read into the buffer? PyObject* bufferOwner; // If possible, we bind into a PyString, PyUnicode, or PyByteArray object. int element_size; // How wide is each character: ASCII/ANSI -> 1, Unicode -> 2 or 4, binary -> 1 bool usingStack; // Is buffer pointing to the initial stack buffer? public: int null_size; // How much room, in bytes, to add for null terminator: binary -> 0, other -> same as a element_size DataBuffer(SQLSMALLINT dataType, char* stackBuffer, SQLLEN stackBufferSize) { // dataType // The type of data we will be reading: SQL_C_CHAR, SQL_C_WCHAR, or SQL_C_BINARY. this->dataType = dataType; element_size = (int)((dataType == SQL_C_WCHAR) ? sizeof(SQLWCHAR) : sizeof(char)); null_size = (dataType == SQL_C_BINARY) ? 0 : element_size; buffer = stackBuffer; bufferSize = stackBufferSize; usingStack = true; bufferOwner = 0; bytesUsed = 0; } ~DataBuffer() { if (!usingStack) { if (bufferOwner) { Py_DECREF(bufferOwner); } else { pyodbc_free(buffer); } } } char* GetBuffer() { if (!buffer) return 0; return buffer + bytesUsed; } SQLLEN GetRemaining() { // Returns the amount of data remaining in the buffer, ready to be passed to SQLGetData. return bufferSize - bytesUsed; } void AddUsed(SQLLEN cbRead) { I(cbRead <= GetRemaining()); bytesUsed += (int)cbRead; } bool AllocateMore(SQLLEN cbAdd) { // cbAdd // The number of bytes (cb --> count of bytes) to add. if (cbAdd == 0) return true; SQLLEN newSize = bufferSize + cbAdd; if (usingStack) { // This is the first call and `buffer` points to stack memory. Allocate a new object and copy the stack // data into it. char* stackBuffer = buffer; if (dataType == SQL_C_CHAR) { bufferOwner = PyBytes_FromStringAndSize(0, newSize); buffer = bufferOwner ? PyBytes_AS_STRING(bufferOwner) : 0; } else if (dataType == SQL_C_BINARY) { #if PY_VERSION_HEX >= 0x02060000 bufferOwner = PyByteArray_FromStringAndSize(0, newSize); buffer = bufferOwner ? PyByteArray_AS_STRING(bufferOwner) : 0; #else bufferOwner = PyBytes_FromStringAndSize(0, newSize); buffer = bufferOwner ? PyBytes_AS_STRING(bufferOwner) : 0; #endif } else if (sizeof(SQLWCHAR) == Py_UNICODE_SIZE) { // Allocate directly into a Unicode object. bufferOwner = PyUnicode_FromUnicode(0, newSize / element_size); buffer = bufferOwner ? (char*)PyUnicode_AsUnicode(bufferOwner) : 0; } else { // We're Unicode, but SQLWCHAR and Py_UNICODE don't match, so maintain our own SQLWCHAR buffer. bufferOwner = 0; buffer = (char*)pyodbc_malloc((size_t)newSize); } if (buffer == 0) return false; usingStack = false; memcpy(buffer, stackBuffer, (size_t)bufferSize); bufferSize = newSize; return true; } if (bufferOwner && PyUnicode_CheckExact(bufferOwner)) { if (PyUnicode_Resize(&bufferOwner, newSize / element_size) == -1) return false; buffer = (char*)PyUnicode_AsUnicode(bufferOwner); } #if PY_VERSION_HEX >= 0x02060000 else if (bufferOwner && PyByteArray_CheckExact(bufferOwner)) { if (PyByteArray_Resize(bufferOwner, newSize) == -1) return false; buffer = PyByteArray_AS_STRING(bufferOwner); } #endif else if (bufferOwner && PyBytes_CheckExact(bufferOwner)) { if (_PyBytes_Resize(&bufferOwner, newSize) == -1) return false; buffer = PyBytes_AS_STRING(bufferOwner); } else { char* tmp = (char*)realloc(buffer, (size_t)newSize); if (tmp == 0) return false; buffer = tmp; } bufferSize = newSize; return true; } PyObject* DetachValue() { // At this point, Trim should have been called by PostRead. if (bytesUsed == SQL_NULL_DATA || buffer == 0) Py_RETURN_NONE; if (usingStack) { if (dataType == SQL_C_CHAR) return PyBytes_FromStringAndSize(buffer, bytesUsed); if (dataType == SQL_C_BINARY) { #if PY_VERSION_HEX >= 0x02060000 return PyByteArray_FromStringAndSize(buffer, bytesUsed); #else return PyBytes_FromStringAndSize(buffer, bytesUsed); #endif } if (sizeof(SQLWCHAR) == Py_UNICODE_SIZE) return PyUnicode_FromUnicode((const Py_UNICODE*)buffer, bytesUsed / element_size); return PyUnicode_FromSQLWCHAR((const SQLWCHAR*)buffer, bytesUsed / element_size); } if (bufferOwner && PyUnicode_CheckExact(bufferOwner)) { if (PyUnicode_Resize(&bufferOwner, bytesUsed / element_size) == -1) return 0; PyObject* tmp = bufferOwner; bufferOwner = 0; buffer = 0; return tmp; } if (bufferOwner && PyBytes_CheckExact(bufferOwner)) { if (_PyBytes_Resize(&bufferOwner, bytesUsed) == -1) return 0; PyObject* tmp = bufferOwner; bufferOwner = 0; buffer = 0; return tmp; } #if PY_VERSION_HEX >= 0x02060000 if (bufferOwner && PyByteArray_CheckExact(bufferOwner)) { if (PyByteArray_Resize(bufferOwner, bytesUsed) == -1) return 0; PyObject* tmp = bufferOwner; bufferOwner = 0; buffer = 0; return tmp; } #endif // We have allocated our own SQLWCHAR buffer and must now copy it to a Unicode object. I(bufferOwner == 0); PyObject* result = PyUnicode_FromSQLWCHAR((const SQLWCHAR*)buffer, bytesUsed / element_size); if (result == 0) return 0; pyodbc_free(buffer); buffer = 0; return result; } }; static PyObject* GetDataString(Cursor* cur, Py_ssize_t iCol) { // Returns a string, unicode, or bytearray object for character and binary data. // // In Python 2.6+, binary data is returned as a byte array. Earlier versions will return an ASCII str object here // which will be wrapped in a buffer object by the caller. // // NULL terminator notes: // // * pinfo->column_size, from SQLDescribeCol, does not include a NULL terminator. For example, column_size for a // char(10) column would be 10. (Also, when dealing with SQLWCHAR, it is the number of *characters*, not bytes.) // // * When passing a length to PyString_FromStringAndSize and similar Unicode functions, do not add the NULL // terminator -- it will be added automatically. See objects/stringobject.c // // * SQLGetData does not return the NULL terminator in the length indicator. (Therefore, you can pass this value // directly to the Python string functions.) // // * SQLGetData will write a NULL terminator in the output buffer, so you must leave room for it. You must also // include the NULL terminator in the buffer length passed to SQLGetData. // // ODBC generalization: // 1) Include NULL terminators in input buffer lengths. // 2) NULL terminators are not used in data lengths. ColumnInfo* pinfo = &cur->colinfos[iCol]; // Some Unix ODBC drivers do not return the correct length. if (pinfo->sql_type == SQL_GUID) pinfo->column_size = 36; SQLSMALLINT nTargetType; switch (pinfo->sql_type) { case SQL_CHAR: case SQL_VARCHAR: case SQL_LONGVARCHAR: case SQL_GUID: case SQL_SS_XML: #if PY_MAJOR_VERSION < 3 if (cur->cnxn->unicode_results) nTargetType = SQL_C_WCHAR; else nTargetType = SQL_C_CHAR; #else nTargetType = SQL_C_WCHAR; #endif break; case SQL_WCHAR: case SQL_WVARCHAR: case SQL_WLONGVARCHAR: nTargetType = SQL_C_WCHAR; break; default: nTargetType = SQL_C_BINARY; break; } char tempBuffer[1026]; // Pad with 2 bytes for driver bugs DataBuffer buffer(nTargetType, tempBuffer, sizeof(tempBuffer)-2); for (int iDbg = 0; iDbg < 10; iDbg++) // failsafe { SQLRETURN ret; SQLLEN cbData = 0; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), nTargetType, buffer.GetBuffer(), buffer.GetRemaining(), &cbData); Py_END_ALLOW_THREADS; if (cbData == SQL_NULL_DATA || (ret == SQL_SUCCESS && cbData < 0)) { // HACK: FreeTDS 0.91 on OS/X returns -4 for NULL data instead of SQL_NULL_DATA (-1). I've traced into the // code and it appears to be the result of assigning -1 to a SQLLEN: // // if (colinfo->column_cur_size < 0) { // /* TODO check what should happen if pcbValue was NULL */ // *pcbValue = SQL_NULL_DATA; // // I believe it will be fine to treat all negative values as NULL for now. Py_RETURN_NONE; } if (!SQL_SUCCEEDED(ret) && ret != SQL_NO_DATA) return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); // The SQLGetData behavior is incredibly quirky. It doesn't tell us the total, the total we've read, or even // the amount just read. It returns the amount just read, plus any remaining. Unfortunately, the only way to // pick them apart is to subtract out the amount of buffer we supplied. SQLLEN cbBuffer = buffer.GetRemaining(); // how much we gave SQLGetData if (ret == SQL_SUCCESS_WITH_INFO) { // There is more data than fits in the buffer. The amount of data equals the amount of data in the buffer // minus a NULL terminator. SQLLEN cbRead; SQLLEN cbMore; if (cbData == SQL_NO_TOTAL) { // We don't know how much more, so just guess. cbRead = cbBuffer - buffer.null_size; cbMore = 2048; } else if (cbData >= cbBuffer) { // There is more data. We supplied cbBuffer, but there was cbData (more). We received cbBuffer, so we // need to subtract that, allocate enough to read the rest (cbData-cbBuffer). cbRead = cbBuffer - buffer.null_size; cbMore = cbData - cbRead; } else { // I'm not really sure why I would be here ... I would have expected SQL_SUCCESS cbRead = cbData - buffer.null_size; cbMore = 0; } buffer.AddUsed(cbRead); if (!buffer.AllocateMore(cbMore)) return PyErr_NoMemory(); } else if (ret == SQL_SUCCESS) { // For some reason, the NULL terminator is used in intermediate buffers but not in this final one. buffer.AddUsed(cbData); } if (ret == SQL_SUCCESS || ret == SQL_NO_DATA) return buffer.DetachValue(); } // REVIEW: Add an error message. return 0; } static PyObject* GetDataUser(Cursor* cur, Py_ssize_t iCol, int conv) { // conv // The index into the connection's user-defined conversions `conv_types`. PyObject* value = GetDataString(cur, iCol); if (value == 0) return 0; PyObject* result = PyObject_CallFunction(cur->cnxn->conv_funcs[conv], "(O)", value); Py_DECREF(value); return result; } #if PY_VERSION_HEX < 0x02060000 static PyObject* GetDataBuffer(Cursor* cur, Py_ssize_t iCol) { PyObject* str = GetDataString(cur, iCol); if (str == Py_None) return str; PyObject* buffer = 0; if (str) { buffer = PyBuffer_FromObject(str, 0, PyString_GET_SIZE(str)); Py_DECREF(str); // If no buffer, release it. If buffer, the buffer owns it. } return buffer; } #endif static PyObject* GetDataDecimal(Cursor* cur, Py_ssize_t iCol) { // The SQL_NUMERIC_STRUCT support is hopeless (SQL Server ignores scale on input parameters and output columns, // Oracle does something else weird, and many drivers don't support it at all), so we'll rely on the Decimal's // string parsing. Unfortunately, the Decimal author does not pay attention to the locale, so we have to modify // the string ourselves. // // Oracle inserts group separators (commas in US, periods in some countries), so leave room for that too. // // Some databases support a 'money' type which also inserts currency symbols. Since we don't want to keep track of // all these, we'll ignore all characters we don't recognize. We will look for digits, negative sign (which I hope // is universal), and a decimal point ('.' or ',' usually). We'll do everything as Unicode in case currencies, // etc. are too far out. // TODO: Is Unicode a good idea for Python 2.7? We need to know which drivers support Unicode. SQLWCHAR buffer[100]; SQLLEN cbFetched = 0; // Note: will not include the NULL terminator. SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_WCHAR, buffer, sizeof(buffer), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; // Remove non-digits and convert the databases decimal to a '.' (required by decimal ctor). // // We are assuming that the decimal point and digits fit within the size of SQLWCHAR. int cch = (int)(cbFetched / sizeof(SQLWCHAR)); for (int i = (cch - 1); i >= 0; i--) { if (buffer[i] == chDecimal) { // Must force it to use '.' since the Decimal class doesn't pay attention to the locale. buffer[i] = '.'; } else if ((buffer[i] < '0' || buffer[i] > '9') && buffer[i] != '-') { memmove(&buffer[i], &buffer[i] + 1, (cch - i) * sizeof(SQLWCHAR)); cch--; } } I(buffer[cch] == 0); Object str(PyUnicode_FromSQLWCHAR(buffer, cch)); if (!str) return 0; return PyObject_CallFunction(decimal_type, "O", str.Get()); } static PyObject* GetDataBit(Cursor* cur, Py_ssize_t iCol) { SQLCHAR ch; SQLLEN cbFetched; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_BIT, &ch, sizeof(ch), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; if (ch == SQL_TRUE) Py_RETURN_TRUE; Py_RETURN_FALSE; } static PyObject* GetDataLong(Cursor* cur, Py_ssize_t iCol) { ColumnInfo* pinfo = &cur->colinfos[iCol]; SQLINTEGER value; SQLLEN cbFetched; SQLRETURN ret; SQLSMALLINT nCType = pinfo->is_unsigned ? SQL_C_ULONG : SQL_C_LONG; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), nCType, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; if (pinfo->is_unsigned) return PyInt_FromLong(*(SQLINTEGER*)&value); return PyInt_FromLong(value); } static PyObject* GetDataLongLong(Cursor* cur, Py_ssize_t iCol) { ColumnInfo* pinfo = &cur->colinfos[iCol]; SQLSMALLINT nCType = pinfo->is_unsigned ? SQL_C_UBIGINT : SQL_C_SBIGINT; SQLBIGINT value; SQLLEN cbFetched; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), nCType, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; if (pinfo->is_unsigned) return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)(SQLUBIGINT)value); return PyLong_FromLongLong((PY_LONG_LONG)value); } static PyObject* GetDataDouble(Cursor* cur, Py_ssize_t iCol) { double value; SQLLEN cbFetched = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_DOUBLE, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; return PyFloat_FromDouble(value); } static PyObject* GetSqlServerTime(Cursor* cur, Py_ssize_t iCol) { SQL_SS_TIME2_STRUCT value; SQLLEN cbFetched = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_BINARY, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; int micros = (int)(value.fraction / 1000); // nanos --> micros return PyTime_FromTime(value.hour, value.minute, value.second, micros); } static PyObject* GetDataTimestamp(Cursor* cur, Py_ssize_t iCol) { TIMESTAMP_STRUCT value; SQLLEN cbFetched = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_TYPE_TIMESTAMP, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; switch (cur->colinfos[iCol].sql_type) { case SQL_TYPE_TIME: { int micros = (int)(value.fraction / 1000); // nanos --> micros return PyTime_FromTime(value.hour, value.minute, value.second, micros); } case SQL_TYPE_DATE: return PyDate_FromDate(value.year, value.month, value.day); } int micros = (int)(value.fraction / 1000); // nanos --> micros return PyDateTime_FromDateAndTime(value.year, value.month, value.day, value.hour, value.minute, value.second, micros); } int GetUserConvIndex(Cursor* cur, SQLSMALLINT sql_type) { // If this sql type has a user-defined conversion, the index into the connection's `conv_funcs` array is returned. // Otherwise -1 is returned. for (int i = 0; i < cur->cnxn->conv_count; i++) if (cur->cnxn->conv_types[i] == sql_type) return i; return -1; } PyObject* GetData(Cursor* cur, Py_ssize_t iCol) { // Returns an object representing the value in the row/field. If 0 is returned, an exception has already been set. // // The data is assumed to be the default C type for the column's SQL type. ColumnInfo* pinfo = &cur->colinfos[iCol]; // First see if there is a user-defined conversion. int conv_index = GetUserConvIndex(cur, pinfo->sql_type); if (conv_index != -1) return GetDataUser(cur, iCol, conv_index); switch (pinfo->sql_type) { case SQL_WCHAR: case SQL_WVARCHAR: case SQL_WLONGVARCHAR: case SQL_CHAR: case SQL_VARCHAR: case SQL_LONGVARCHAR: case SQL_GUID: case SQL_SS_XML: #if PY_VERSION_HEX >= 0x02060000 case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: #endif return GetDataString(cur, iCol); #if PY_VERSION_HEX < 0x02060000 case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: return GetDataBuffer(cur, iCol); #endif case SQL_DECIMAL: case SQL_NUMERIC: { if (decimal_type == 0) break; return GetDataDecimal(cur, iCol); } case SQL_BIT: return GetDataBit(cur, iCol); case SQL_TINYINT: case SQL_SMALLINT: case SQL_INTEGER: return GetDataLong(cur, iCol); case SQL_BIGINT: return GetDataLongLong(cur, iCol); case SQL_REAL: case SQL_FLOAT: case SQL_DOUBLE: return GetDataDouble(cur, iCol); case SQL_TYPE_DATE: case SQL_TYPE_TIME: case SQL_TYPE_TIMESTAMP: return GetDataTimestamp(cur, iCol); case SQL_SS_TIME2: return GetSqlServerTime(cur, iCol); } return RaiseErrorV("HY106", ProgrammingError, "ODBC SQL type %d is not yet supported. column-index=%zd type=%d", (int)pinfo->sql_type, iCol, (int)pinfo->sql_type); } pyodbc-3.0.7/src/buffer.h0000666000175000017500000000420612031131304013656 0ustar dokodoko // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef _BUFFER_H #define _BUFFER_H #if PY_MAJOR_VERSION < 3 // If the buffer object has a single, accessible segment, returns the length of the buffer. If 'pp' is not NULL, the // address of the segment is also returned. If there is more than one segment or if it cannot be accessed, -1 is // returned and 'pp' is not modified. Py_ssize_t PyBuffer_GetMemory(PyObject* buffer, const char** pp); // Returns the size of a Python buffer. // // If an error occurs, zero is returned, but zero is a valid buffer size (I guess), so use PyErr_Occurred to determine // if it represents a failure. Py_ssize_t PyBuffer_Size(PyObject* self); class BufferSegmentIterator { PyObject* pBuffer; Py_ssize_t iSegment; Py_ssize_t cSegments; public: BufferSegmentIterator(PyObject* _pBuffer) { pBuffer = _pBuffer; PyBufferProcs* procs = Py_TYPE(pBuffer)->tp_as_buffer; iSegment = 0; cSegments = procs->bf_getsegcount(pBuffer, 0); } bool Next(byte*& pb, SQLLEN &cb) { if (iSegment >= cSegments) return false; PyBufferProcs* procs = Py_TYPE(pBuffer)->tp_as_buffer; cb = procs->bf_getreadbuffer(pBuffer, iSegment++, (void**)&pb); return true; } }; #endif // PY_MAJOR_VERSION #endif pyodbc-3.0.7/src/buffer.cpp0000666000175000017500000000366212031131304014216 0ustar dokodoko // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pyodbc.h" #if PY_MAJOR_VERSION < 3 #include "buffer.h" #include "pyodbcmodule.h" Py_ssize_t PyBuffer_GetMemory(PyObject* buffer, const char** pp) { PyBufferProcs* procs = Py_TYPE(buffer)->tp_as_buffer; if (!procs || !PyType_HasFeature(Py_TYPE(buffer), Py_TPFLAGS_HAVE_GETCHARBUFFER)) { // Can't access the memory directly because the buffer object doesn't support it. return -1; } if (procs->bf_getsegcount(buffer, 0) != 1) { // Can't access the memory directly because there is more than one segment. return -1; } #if PY_VERSION_HEX >= 0x02050000 char* pT = 0; #else const char* pT = 0; #endif Py_ssize_t cb = procs->bf_getcharbuffer(buffer, 0, &pT); if (pp) *pp = pT; return cb; } Py_ssize_t PyBuffer_Size(PyObject* self) { if (!PyBuffer_Check(self)) { PyErr_SetString(PyExc_TypeError, "Not a buffer!"); return 0; } Py_ssize_t total_len = 0; Py_TYPE(self)->tp_as_buffer->bf_getsegcount(self, &total_len); return total_len; } #endif pyodbc-3.0.7/src/resource.h0000666000175000017500000000060212031131304014230 0ustar dokodoko//{{NO_DEPENDENCIES}} // Microsoft Visual C++ generated include file. // Used by pyodbc.rc // Next default values for new objects // #ifdef APSTUDIO_INVOKED #ifndef APSTUDIO_READONLY_SYMBOLS #define _APS_NEXT_RESOURCE_VALUE 101 #define _APS_NEXT_COMMAND_VALUE 40001 #define _APS_NEXT_CONTROL_VALUE 1001 #define _APS_NEXT_SYMED_VALUE 101 #endif #endif pyodbc-3.0.7/setup.py0000666000175000017500000002372312034534654013206 0ustar dokodoko#!/usr/bin/python import sys, os, re, platform from os.path import exists, abspath, dirname, join, isdir try: # Allow use of setuptools so eggs can be built. from setuptools import setup, Command except ImportError: from distutils.core import setup, Command from distutils.extension import Extension from distutils.errors import * OFFICIAL_BUILD = 9999 def _print(s): # Python 2/3 compatibility sys.stdout.write(s + '\n') class VersionCommand(Command): description = "prints the pyodbc version, determined from git" user_options = [] def initialize_options(self): self.verbose = 0 def finalize_options(self): pass def run(self): version_str, version = get_version() sys.stdout.write(version_str + '\n') class TagsCommand(Command): description = 'runs etags' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): # Windows versions of etag do not seem to expand wildcards (which Unix shells normally do for Unix utilities), # so find all of the files ourselves. files = [ join('src', f) for f in os.listdir('src') if f.endswith(('.h', '.cpp')) ] cmd = 'etags %s' % ' '.join(files) return os.system(cmd) def main(): version_str, version = get_version() settings = get_compiler_settings(version_str) files = [ abspath(join('src', f)) for f in os.listdir('src') if f.endswith('.cpp') ] if exists('MANIFEST'): os.remove('MANIFEST') kwargs = { 'name': "pyodbc", 'version': version_str, 'description': "DB API Module for ODBC", 'long_description': ('A Python DB API 2 module for ODBC. This project provides an up-to-date, ' 'convenient interface to ODBC using native data types like datetime and decimal.'), 'maintainer': "Michael Kleehammer", 'maintainer_email': "michael@kleehammer.com", 'ext_modules': [Extension('pyodbc', files, **settings)], 'license': 'MIT', 'classifiers': ['Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Database', ], 'url': 'http://code.google.com/p/pyodbc', 'download_url': 'http://code.google.com/p/pyodbc/downloads/list', 'cmdclass': { 'version' : VersionCommand, 'tags' : TagsCommand } } if sys.hexversion >= 0x02060000: kwargs['options'] = { 'bdist_wininst': {'user_access_control' : 'auto'} } setup(**kwargs) def get_compiler_settings(version_str): settings = { 'libraries': [], 'define_macros' : [ ('PYODBC_VERSION', version_str) ] } # This isn't the best or right way to do this, but I don't see how someone is supposed to sanely subclass the build # command. for option in ['assert', 'trace', 'leak-check']: try: sys.argv.remove('--%s' % option) settings['define_macros'].append(('PYODBC_%s' % option.replace('-', '_').upper(), 1)) except ValueError: pass if os.name == 'nt': settings['extra_compile_args'] = ['/Wall', '/wd4668', '/wd4820', '/wd4711', # function selected for automatic inline expansion '/wd4100', # unreferenced formal parameter '/wd4127', # "conditional expression is constant" testing compilation constants '/wd4191', # casts to PYCFunction which doesn't have the keywords parameter ] settings['libraries'].append('odbc32') settings['libraries'].append('advapi32') if '--debug' in sys.argv: sys.argv.remove('--debug') settings['extra_compile_args'].extend('/Od /Ge /GS /GZ /RTC1 /Wp64 /Yd'.split()) elif os.environ.get("OS", '').lower().startswith('windows'): # Windows Cygwin (posix on windows) # OS name not windows, but still on Windows settings['libraries'].append('odbc32') elif sys.platform == 'darwin': # OS/X now ships with iODBC. settings['libraries'].append('iodbc') # Apple has decided they won't maintain the iODBC system in OS/X and has added deprecation warnings in 10.8. # For now target 10.7 to eliminate the warnings. # Python functions take a lot of 'char *' that really should be const. gcc complains about this *a lot* settings['extra_compile_args'] = ['-Wno-write-strings', '-Wno-deprecated-declarations'] settings['define_macros'].append( ('MAC_OS_X_VERSION_10_7',) ) else: # Other posix-like: Linux, Solaris, etc. # Python functions take a lot of 'char *' that really should be const. gcc complains about this *a lot* settings['extra_compile_args'] = ['-Wno-write-strings'] # What is the proper way to detect iODBC, MyODBC, unixODBC, etc.? settings['libraries'].append('odbc') return settings def add_to_path(): """ Prepends the build directory to the path so pyodbcconf can be imported without installing it. """ # Now run the utility import imp library_exts = [ t[0] for t in imp.get_suffixes() if t[-1] == imp.C_EXTENSION ] library_names = [ 'pyodbcconf%s' % ext for ext in library_exts ] # Only go into directories that match our version number. dir_suffix = '-%s.%s' % (sys.version_info[0], sys.version_info[1]) build = join(dirname(abspath(__file__)), 'build') for top, dirs, files in os.walk(build): dirs = [ d for d in dirs if d.endswith(dir_suffix) ] for name in library_names: if name in files: sys.path.insert(0, top) return raise SystemExit('Did not find pyodbcconf') def get_version(): """ Returns the version of the product as (description, [major,minor,micro,beta]). If the release is official, `beta` will be 9999 (OFFICIAL_BUILD). 1. If in a git repository, use the latest tag (git describe). 2. If in an unzipped source directory (from setup.py sdist), read the version from the PKG-INFO file. 3. Use 3.0.0.0 and complain a lot. """ # My goal is to (1) provide accurate tags for official releases but (2) not have to manage tags for every test # release. # # Official versions are tagged using 3 numbers: major, minor, micro. A build of a tagged version should produce # the version using just these pieces, such as 2.1.4. # # Unofficial versions are "working towards" the next version. So the next unofficial build after 2.1.4 would be a # beta for 2.1.5. Using 'git describe' we can find out how many changes have been made after 2.1.4 and we'll use # this count as the beta id (beta1, beta2, etc.) # # Since the 4 numbers are put into the Windows DLL, we want to make sure the beta versions sort *before* the # official, so we set the official build number to 9999, but we don't show it. name = None # branch/feature name. Should be None for official builds. numbers = None # The 4 integers that make up the version. # If this is a source release the version will have already been assigned and be in the PKG-INFO file. name, numbers = _get_version_pkginfo() # If not a source release, we should be in a git repository. Look for the latest tag. if not numbers: name, numbers = _get_version_git() if not numbers: _print('WARNING: Unable to determine version. Using 3.0.0.0') name, numbers = '3.0.0-unsupported', [3,0,0,0] return name, numbers def _get_version_pkginfo(): filename = join(dirname(abspath(__file__)), 'PKG-INFO') if exists(filename): re_ver = re.compile(r'^Version: \s+ (\d+)\.(\d+)\.(\d+) (?: -beta(\d+))?', re.VERBOSE) for line in open(filename): match = re_ver.search(line) if match: name = line.split(':', 1)[1].strip() numbers = [int(n or 0) for n in match.groups()[:3]] numbers.append(int(match.group(4) or OFFICIAL_BUILD)) # don't use 0 as a default for build return name, numbers return None, None def _get_version_git(): n, result = getoutput('git describe --tags --match 3.*') if n: _print('WARNING: git describe failed with: %s %s' % (n, result)) return None, None match = re.match(r'(\d+).(\d+).(\d+) (?: -(\d+)-g[0-9a-z]+)?', result, re.VERBOSE) if not match: return None, None numbers = [int(n or OFFICIAL_BUILD) for n in match.groups()] if numbers[-1] == OFFICIAL_BUILD: name = '%s.%s.%s' % tuple(numbers[:3]) if numbers[-1] != OFFICIAL_BUILD: # This is a beta of the next micro release, so increment the micro number to reflect this. numbers[-2] += 1 name = '%s.%s.%s-beta%02d' % tuple(numbers) n, result = getoutput('git branch') branch = re.search(r'\* (\w+)', result).group(1) if branch != 'master' and not re.match('^v\d+$', branch): name = branch + '-' + name return name, numbers def getoutput(cmd): pipe = os.popen(cmd, 'r') text = pipe.read().rstrip('\n') status = pipe.close() or 0 return status, text if __name__ == '__main__': main() pyodbc-3.0.7/README.rst0000666000175000017500000001313712031131304013137 0ustar dokodoko Overview ======== This project is a Python database module for ODBC that implements the Python DB API 2.0 specification. :homepage: http://code.google.com/p/pyodbc :source: http://github.com/mkleehammer/pyodbc :source: http://code.google.com/p/pyodbc/source/list This module requires: * Python 2.4 or greater * ODBC 3.0 or greater On Windows, the easiest way to install is to use the Windows installers from: http://code.google.com/p/pyodbc/downloads/list Source can be obtained at http://github.com/mkleehammer/pyodbc/tree or http://code.google.com/p/pyodbc/source/list To build from source, either check the source out of version control or download a source extract and run:: python setup.py build install Module Specific Behavior ======================== General ------- * The pyodbc.connect function accepts a single parameter: the ODBC connection string. This string is not read or modified by pyodbc, so consult the ODBC documentation or your ODBC driver's documentation for details. The general format is:: cnxn = pyodbc.connect('DSN=mydsn;UID=userid;PWD=pwd') * Connection caching in the ODBC driver manager is automatically enabled. * Call cnxn.commit() since the DB API specification requires a rollback when a connection is closed that was not specifically committed. * When a connection is closed, all cursors created from the connection are closed. Data Types ---------- * Dates, times, and timestamps use the Python datetime module's date, time, and datetime classes. These classes can be passed directly as parameters and will be returned when querying date/time columns. * Binary data is passed and returned in Python buffer objects. * Decimal and numeric columns are passed and returned using the Python 2.4 decimal class. Convenient Additions -------------------- * Cursors are iterable and returns Row objects. :: cursor.execute("select a,b from tmp") for row in cursor: print row * The DB API specifies that results must be tuple-like, so columns are normally accessed by indexing into the sequence (e.g. row[0]) and pyodbc supports this. However, columns can also be accessed by name:: cursor.execute("select album_id, photo_id from photos where user_id=1") row = cursor.fetchone() print row.album_id, row.photo_id print row[0], row[1] # same as above, but less readable This makes the code easier to maintain when modifying SQL, more readable, and allows rows to be used where a custom class might otherwise be used. All rows from a single execute share the same dictionary of column names, so using Row objects to hold a large result set may also use less memory than creating a object for each row. The SQL "as" keyword allows the name of a column in the result set to be specified. This is useful if a column name has spaces or if there is no name:: cursor.execute("select count(*) as photo_count from photos where user_id < 100") row = cursor.fetchone() print row.photo_count * The DB API specification does not specify the return value of Cursor.execute. Previous versions of pyodbc (2.0.x) returned different values, but the 2.1 versions always return the Cursor itself. This allows for compact code such as:: for row in cursor.execute("select album_id, photo_id from photos where user_id=1"): print row.album_id, row.photo_id row = cursor.execute("select * from tmp").fetchone() rows = cursor.execute("select * from tmp").fetchall() count = cursor.execute("update photos set processed=1 where user_id=1").rowcount count = cursor.execute("delete from photos where user_id=1").rowcount * Though SQL is very powerful, values sometimes need to be modified before they can be used. Rows allow their values to be replaced, which makes them even more convenient ad-hoc data structures. :: # Replace the 'start_date' datetime in each row with one that has a time zone. rows = cursor.fetchall() for row in rows: row.start_date = row.start_date.astimezone(tz) Note that columns cannot be added to rows; only values for existing columns can be modified. * As specified in the DB API, Cursor.execute accepts an optional sequence of parameters:: cursor.execute("select a from tbl where b=? and c=?", (x, y)) However, this seems complicated for something as simple as passing parameters, so pyodbc also accepts the parameters directly. Note in this example that x & y are not in a tuple:: cursor.execute("select a from tbl where b=? and c=?", x, y) * The DB API specifies that connections require a manual commit and pyodbc complies with this. However, connections also support autocommit, using the autocommit keyword of the connection function or the autocommit attribute of the Connection object:: cnxn = pyodbc.connect(cstring, autocommit=True) or :: cnxn.autocommit = True cnxn.autocommit = False Goals / Design ============== * This module should not require any 3rd party modules other than ODBC. * Only built-in data types should be used where possible. a) Reduces the number of libraries to learn. b) Reduces the number of modules and libraries to install. c) Eventually a standard is usually introduced. For example, many previous database drivers used the mxDate classes. Now that Python 2.3 has introduced built-in date/time classes, using those modules is more complicated than using the built-ins. * It should adhere to the DB API specification, but be more "Pythonic" when convenient. The most common usages should be optimized for convenience and speed. * All ODBC functionality should (eventually) be exposed.