././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1629404586.3039672 pyodbc-4.0.32/0000775000175000017500000000000000000000000014423 5ustar00mkleehammermkleehammer././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/.flake80000664000175000017500000000021700000000000015576 0ustar00mkleehammermkleehammer[flake8] max_line_length: 95 ignore = E221, # multi spaces before op - I line up assignments often E401, # multiple imports on one line././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/.gitignore0000664000175000017500000000121600000000000016413 0ustar00mkleehammermkleehammer# This file is based largely on the GitHub .gitignore template for Python: # https://github.com/github/gitignore/blob/master/Python.gitignore # Python __pycache__/ *.py[cod] setup.cfg # C extensions *.so *.dll # Distribution / packaging build/ dist/ lib/ lib64/ MANIFEST /PKG-INFO sdist/ wheelhouse/ *.egg-info/ *.egg # Unit test / coverage reports .tox/ .coverage # Virtual Environments /venv* /.env/ /.venv # JetBrains .idea/ # Visual Studio .vscode/ .vs/ *.sln *.vcxproj* x64/ x86/ *.pdb # Executables *.exe # Other pyodbc.conf tmp tags # The Access unit tests copy empty.accdb and empty.mdb to these names and use them. test.accdb test.mdb ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/LICENSE.txt0000664000175000017500000000154400000000000016252 0ustar00mkleehammermkleehammerPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/MANIFEST.in0000664000175000017500000000026100000000000016160 0ustar00mkleehammermkleehammerinclude src/*.h include src/*.cpp include tests2/* include tests3/* include README.* include LICENSE.txt # Include this file, needed for bdist_rpm include MANIFEST.in ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1629404586.3039672 pyodbc-4.0.32/PKG-INFO0000664000175000017500000000461400000000000015525 0ustar00mkleehammermkleehammerMetadata-Version: 2.1 Name: pyodbc Version: 4.0.32 Summary: DB API Module for ODBC Home-page: https://github.com/mkleehammer/pyodbc Maintainer: Michael Kleehammer Maintainer-email: michael@kleehammer.com License: MIT Description: # pyodbc [![Windows Status](https://ci.appveyor.com/api/projects/status/github/mkleehammer/pyodbc?branch=master&svg=true&passingText=Windows%20build)](https://ci.appveyor.com/project/mkleehammer/pyodbc) [![Ubuntu build](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml/badge.svg)](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml) [![PyPI](https://img.shields.io/pypi/v/pyodbc?color=brightgreen)](https://pypi.org/project/pyodbc/) pyodbc is an open source Python module that makes accessing ODBC databases simple. It implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) specification but is packed with even more Pythonic convenience. The easiest way to install is to use pip: pip install pyodbc Precompiled binary wheels are provided for most Python versions on Windows and macOS. On other operating systems this will build from source. Note, pyodbc contains C++ extensions so you will need a suitable C++ compiler on your computer to install pyodbc, for all operating systems. See the [docs](https://github.com/mkleehammer/pyodbc/wiki/Install) for details. [Documentation](https://github.com/mkleehammer/pyodbc/wiki) [Release Notes](https://github.com/mkleehammer/pyodbc/releases) Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Topic :: Database Description-Content-Type: text/markdown ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629144457.0 pyodbc-4.0.32/README.md0000664000175000017500000000235700000000000015711 0ustar00mkleehammermkleehammer# pyodbc [![Windows Status](https://ci.appveyor.com/api/projects/status/github/mkleehammer/pyodbc?branch=master&svg=true&passingText=Windows%20build)](https://ci.appveyor.com/project/mkleehammer/pyodbc) [![Ubuntu build](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml/badge.svg)](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml) [![PyPI](https://img.shields.io/pypi/v/pyodbc?color=brightgreen)](https://pypi.org/project/pyodbc/) pyodbc is an open source Python module that makes accessing ODBC databases simple. It implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) specification but is packed with even more Pythonic convenience. The easiest way to install is to use pip: pip install pyodbc Precompiled binary wheels are provided for most Python versions on Windows and macOS. On other operating systems this will build from source. Note, pyodbc contains C++ extensions so you will need a suitable C++ compiler on your computer to install pyodbc, for all operating systems. See the [docs](https://github.com/mkleehammer/pyodbc/wiki/Install) for details. [Documentation](https://github.com/mkleehammer/pyodbc/wiki) [Release Notes](https://github.com/mkleehammer/pyodbc/releases) ././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1629404586.295967 pyodbc-4.0.32/appveyor/0000775000175000017500000000000000000000000016270 5ustar00mkleehammermkleehammer././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/appveyor/after_test.cmd0000664000175000017500000000061200000000000021114 0ustar00mkleehammermkleehammerIF "%APVYR_GENERATE_WHEELS%" == "true" ( ECHO *** pip install the "wheel" module "%PYTHON_HOME%\python" -m pip install wheel --quiet --no-warn-script-location ECHO. ECHO *** Generate the wheel file %WITH_COMPILER% "%PYTHON_HOME%\python" setup.py bdist_wheel ECHO. ECHO *** \dist directory listing: DIR /B dist ) ELSE ( ECHO *** Skipping generation of the wheel file ECHO. ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/appveyor/build_script.cmd0000664000175000017500000000326300000000000021444 0ustar00mkleehammermkleehammerECHO *** Environment variables: ECHO APPVEYOR_BUILD_FOLDER : %APPVEYOR_BUILD_FOLDER% ECHO APPVEYOR_BUILD_WORKER_IMAGE: %APPVEYOR_BUILD_WORKER_IMAGE% ECHO APPVEYOR_JOB_NUMBER: %APPVEYOR_JOB_NUMBER% ECHO APPVEYOR_JOB_ID : %APPVEYOR_JOB_ID% ECHO APPVEYOR_JOB_NAME : %APPVEYOR_JOB_NAME% ECHO APVYR_RUN_TESTS : %APVYR_RUN_TESTS% ECHO APVYR_RUN_MSSQL_TESTS : %APVYR_RUN_MSSQL_TESTS% ECHO APVYR_RUN_POSTGRES_TESTS: %APVYR_RUN_POSTGRES_TESTS% ECHO APVYR_RUN_MYSQL_TESTS : %APVYR_RUN_MYSQL_TESTS% ECHO APVYR_GENERATE_WHEELS : %APVYR_GENERATE_WHEELS% ECHO APVYR_VERBOSE : %APVYR_VERBOSE% ECHO PYTHON_HOME : %PYTHON_HOME% ECHO MSSQL_INSTANCE: %MSSQL_INSTANCE% ECHO POSTGRES_PATH : %POSTGRES_PATH% ECHO MYSQL_PATH : %MYSQL_PATH% ECHO. ECHO *** Get build info and compiler for the current Python installation: "%PYTHON_HOME%\python" -c "import platform; print(platform.python_build(), platform.python_compiler())" ECHO. ECHO *** Update pip and setuptools... "%PYTHON_HOME%\python" -m pip install --upgrade pip setuptools --quiet --no-warn-script-location IF ERRORLEVEL 1 ( ECHO *** ERROR: pip/setuptools update failed EXIT 1 ) "%PYTHON_HOME%\python" -m pip freeze --all ECHO. ECHO *** Building the pyodbc module... %WITH_COMPILER% "%PYTHON_HOME%\python" setup.py build IF ERRORLEVEL 1 ( ECHO *** ERROR: pyodbc build failed EXIT 1 ) ECHO. ECHO *** Installing pyodbc... "%PYTHON_HOME%\python" setup.py install IF ERRORLEVEL 1 ( ECHO *** ERROR: pyodbc install failed EXIT 1 ) ECHO. ECHO *** pip freeze... "%PYTHON_HOME%\python" -m pip freeze --all ECHO. ECHO *** Get version of the built pyodbc module: "%PYTHON_HOME%\python" -c "import pyodbc; print(pyodbc.version)" ECHO. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/appveyor/compile.cmd0000664000175000017500000000660000000000000020407 0ustar00mkleehammermkleehammer:: To build extensions for 64 bit Python 2, we need to configure environment :: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of: :: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0) :: :: To build extensions for 64 bit Python 3, we need to configure environment :: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: :: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1) :: :: 32 bit builds, and 64-bit builds for 3.5 and beyond, do not require specific :: environment configurations. :: :: Note: this script needs to be run with the /E:ON and /V:ON flags for the :: cmd interpreter, at least for (SDK v7.0) :: :: More details at: :: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows :: http://stackoverflow.com/a/13751649/163740 :: :: Author: Olivier Grisel :: License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ :: :: The repeated CALL commands at the end of this file look redundant, but :: if you move them outside the IF clauses, they do not run properly in :: the SET_SDK_64==Y case, I don't know why. @ECHO OFF SET COMMAND_TO_RUN=%* SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows SET WIN_WDK=C:\Program Files (x86)\Windows Kits\10\Include\wdf :: Extract the major and minor versions of the current Python interpreter, and bitness FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write(str(sys.version_info.major))"`) DO ( SET PYTHON_MAJOR_VERSION=%%F ) FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write(str(sys.version_info.minor))"`) DO ( SET PYTHON_MINOR_VERSION=%%F ) FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write('64' if sys.maxsize > 2**32 else '32')"`) DO ( SET PYTHON_ARCH=%%F ) ECHO Inferred Python version (major, minor, arch): %PYTHON_MAJOR_VERSION% %PYTHON_MINOR_VERSION% %PYTHON_ARCH% :: Based on the Python version, determine what SDK version to use, and whether :: to set the SDK for 64-bit. IF %PYTHON_MAJOR_VERSION% EQU 2 ( SET WINDOWS_SDK_VERSION="v7.0" SET SET_SDK_64=Y ) ELSE ( IF %PYTHON_MAJOR_VERSION% EQU 3 ( SET WINDOWS_SDK_VERSION="v7.1" IF %PYTHON_MINOR_VERSION% LEQ 4 ( SET SET_SDK_64=Y ) ELSE ( SET SET_SDK_64=N IF EXIST "%WIN_WDK%" ( :: See: https://connect.microsoft.com/VisualStudio/feedback/details/1610302/ REN "%WIN_WDK%" 0wdf ) ) ) ELSE ( ECHO Unsupported Python version: "%PYTHON_MAJOR_VERSION%" EXIT 1 ) ) IF %PYTHON_ARCH% EQU 64 ( IF %SET_SDK_64% == Y ( ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %PYTHON_MAJOR_VERSION% on a 64 bit architecture SET DISTUTILS_USE_SDK=1 SET MSSdk=1 "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION% "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release ECHO Executing: %COMMAND_TO_RUN% CALL %COMMAND_TO_RUN% || EXIT 1 ) ELSE ( ECHO Using default MSVC build environment for 64 bit architecture ECHO Executing: %COMMAND_TO_RUN% CALL %COMMAND_TO_RUN% || EXIT 1 ) ) ELSE ( ECHO Using default MSVC build environment for 32 bit architecture ECHO Executing: %COMMAND_TO_RUN% CALL %COMMAND_TO_RUN% || EXIT 1 ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/appveyor/install.ps10000664000175000017500000002377300000000000020377 0ustar00mkleehammermkleehammer# check that all the required ODBC drivers are available, and install any that are missing Function DownloadFileFromUrl ($url, $file_path) { # try multiple times to download the file $success = $false $attempt_number = 1 $max_attempts = 5 while ($true) { try { Start-FileDownload -Url $url -FileName $file_path $success = $true } catch { Write-Error $_ Write-Output "WARNING: download attempt number $attempt_number of $max_attempts failed" } if ($success) {return} if ($attempt_number -ge $max_attempts) {break} Start-Sleep -Seconds 10 $attempt_number += 1 } # delete the file, just in case, to indicate failure if (Test-Path $file_path) { Remove-Item $file_path } } Function CheckAndInstallMsiFromUrl ($driver_name, $driver_bitness, $driver_url, $msifile_path, $msiexec_paras) { Write-Output "" # check whether the driver is already installed $d = Get-OdbcDriver -Name $driver_name -Platform $driver_bitness -ErrorAction:SilentlyContinue if ($?) { Write-Output "*** Driver ""$driver_name"" ($driver_bitness) already installed: $($d.Attribute.Driver)" return } else { Write-Output "*** Driver ""$driver_name"" ($driver_bitness) not found" } # get the driver's msi file, check the AppVeyor cache first if (Test-Path $msifile_path) { Write-Output "Driver's msi file found in the cache" } else { DownloadFileFromUrl -url $driver_url -file_path $msifile_path If (-Not (Test-Path $msifile_path)) { Write-Output "ERROR: Could not download the msi file from ""$driver_url""" return } } # install the driver's msi file # Note, there is an alternate method of calling msiexec.exe using cmd: # cmd /c start /wait msiexec.exe /i "$msifile_path" /quiet /qn /norestart # if (!$?) {...} Write-Output "Installing the driver..." $msi_args = @("/quiet", "/passive", "/qn", "/norestart", "/i", ('"{0}"' -f $msifile_path)) if ($msiexec_paras) { $msi_args += $msiexec_paras } $result = Start-Process "msiexec.exe" -ArgumentList $msi_args -Wait -PassThru if ($result.ExitCode -ne 0) { Write-Output "ERROR: Driver installation failed" Write-Output $result return } Write-Output "...driver installed successfully" } Function CheckAndInstallZippedMsiFromUrl ($driver_name, $driver_bitness, $driver_url, $zipfile_path, $zip_internal_msi_file, $msifile_path) { Write-Output "" # check whether the driver is already installed if ($d = Get-OdbcDriver -Name $driver_name -Platform $driver_bitness -ErrorAction:SilentlyContinue) { Write-Output "*** Driver ""$driver_name"" ($driver_bitness) already installed: $($d.Attribute.Driver)" return } else { Write-Output "*** Driver ""$driver_name"" ($driver_bitness) not found" } if (Test-Path $msifile_path) { Write-Output "Driver's msi file found in the cache" } else { DownloadFileFromUrl -url $driver_url -file_path $zipfile_path If (-Not (Test-Path $zipfile_path)) { Write-Output "ERROR: Could not download the zip file from $driver_url" return } Write-Output "Unzipping..." Expand-Archive -Path $zipfile_path -DestinationPath $temp_dir Copy-Item -Path "$temp_dir\$zip_internal_msi_file" -Destination $msifile_path -Force } Write-Output "Installing the driver..." $msi_args = @("/i", ('"{0}"' -f $msifile_path), "/quiet", "/qn", "/norestart") $result = Start-Process "msiexec.exe" -ArgumentList $msi_args -Wait -PassThru if ($result.ExitCode -ne 0) { Write-Output "ERROR: Driver installation failed" Write-Output $result return } Write-Output "...driver installed successfully" } # get Python version and bitness $python_major_version = cmd /c "${env:PYTHON_HOME}\python" -c "import sys; sys.stdout.write(str(sys.version_info.major))" $python_minor_version = cmd /c "${env:PYTHON_HOME}\python" -c "import sys; sys.stdout.write(str(sys.version_info.minor))" $python_arch = cmd /c "${env:PYTHON_HOME}\python" -c "import sys; sys.stdout.write('64' if sys.maxsize > 2**32 else '32')" # directories used exclusively by AppVeyor $cache_dir = "$env:APPVEYOR_BUILD_FOLDER\apvyr_cache" If (Test-Path $cache_dir) { Write-Output "*** Contents of the cache directory: $cache_dir" Get-ChildItem $cache_dir } else { Write-Output "*** Creating directory ""$cache_dir""..." New-Item -ItemType Directory -Path $cache_dir | out-null } $temp_dir = "$env:APPVEYOR_BUILD_FOLDER\apvyr_tmp" If (-Not (Test-Path $temp_dir)) { Write-Output "*** Creating directory ""$temp_dir""..." New-Item -ItemType Directory -Path $temp_dir | out-null } # output the already available ODBC drivers before installation If (${env:APVYR_VERBOSE} -eq "true") { Write-Output "" Write-Output "*** Installed ODBC drivers:" Get-OdbcDriver } # Microsoft SQL Server # AppVeyor build servers are always 64-bit and therefore only the 64-bit # SQL Server ODBC driver msi files can be installed on them. However, # the 64-bit msi files include both 32-bit and 64-bit drivers anyway. # The "SQL Server Native Client 10.0" and "SQL Server Native Client 11.0" driver # downloads do not appear to be available, hence cannot be installed. CheckAndInstallMsiFromUrl ` -driver_name "ODBC Driver 11 for SQL Server" ` -driver_bitness "64-bit" ` -driver_url "https://download.microsoft.com/download/5/7/2/57249A3A-19D6-4901-ACCE-80924ABEB267/ENU/x64/msodbcsql.msi" ` -msifile_path "$cache_dir\msodbcsql_11.0.0.0_x64.msi" ` -msiexec_paras @("IACCEPTMSODBCSQLLICENSETERMS=YES", "ADDLOCAL=ALL"); # with the 13.0 driver, some tests fail for Python 2.7 so using version 13.1 # 13.0: https://download.microsoft.com/download/1/E/7/1E7B1181-3974-4B29-9A47-CC857B271AA2/English/X64/msodbcsql.msi # 13.1: https://download.microsoft.com/download/D/5/E/D5EEF288-A277-45C8-855B-8E2CB7E25B96/x64/msodbcsql.msi CheckAndInstallMsiFromUrl ` -driver_name "ODBC Driver 13 for SQL Server" ` -driver_bitness "64-bit" ` -driver_url "https://download.microsoft.com/download/D/5/E/D5EEF288-A277-45C8-855B-8E2CB7E25B96/x64/msodbcsql.msi" ` -msifile_path "$cache_dir\msodbcsql_13.1.0.0_x64.msi" ` -msiexec_paras @("IACCEPTMSODBCSQLLICENSETERMS=YES", "ADDLOCAL=ALL"); CheckAndInstallMsiFromUrl ` -driver_name "ODBC Driver 17 for SQL Server" ` -driver_bitness "64-bit" ` -driver_url "https://download.microsoft.com/download/E/6/B/E6BFDC7A-5BCD-4C51-9912-635646DA801E/en-US/17.5.2.1/x64/msodbcsql.msi" ` -msifile_path "$cache_dir\msodbcsql_17.5.1.1_x64.msi" ` -msiexec_paras @("IACCEPTMSODBCSQLLICENSETERMS=YES", "ADDLOCAL=ALL"); # some drivers must be installed in alignment with Python's bitness if ($python_arch -eq "64") { CheckAndInstallZippedMsiFromUrl ` -driver_name "PostgreSQL Unicode(x64)" ` -driver_bitness "64-bit" ` -driver_url "https://ftp.postgresql.org/pub/odbc/versions/msi/psqlodbc_11_01_0000-x64.zip" ` -zipfile_path "$temp_dir\psqlodbc_11_01_0000-x64.zip" ` -zip_internal_msi_file "psqlodbc_x64.msi" ` -msifile_path "$cache_dir\psqlodbc_11_01_0000-x64.msi"; # MySQL 8.0 drivers apparently don't work on Python 2.7 ("system error 126"). # Note, installing MySQL 8.0 ODBC drivers causes the 5.3 drivers to be uninstalled. if ($python_major_version -eq "2") { CheckAndInstallMsiFromUrl ` -driver_name "MySQL ODBC 5.3 ANSI Driver" ` -driver_bitness "64-bit" ` -driver_url "https://dev.mysql.com/get/Downloads/Connector-ODBC/5.3/mysql-connector-odbc-5.3.14-winx64.msi" ` -msifile_path "$cache_dir\mysql-connector-odbc-5.3.14-winx64.msi"; } else { CheckAndInstallMsiFromUrl ` -driver_name "MySQL ODBC 8.0 ANSI Driver" ` -driver_bitness "64-bit" ` -driver_url "https://dev.mysql.com/get/Downloads/Connector-ODBC/8.0/mysql-connector-odbc-8.0.19-winx64.msi" ` -msifile_path "$cache_dir\mysql-connector-odbc-8.0.19-winx64.msi"; } } elseif ($python_arch -eq "32") { CheckAndInstallZippedMsiFromUrl ` -driver_name "PostgreSQL Unicode" ` -driver_bitness "32-bit" ` -driver_url "https://ftp.postgresql.org/pub/odbc/versions/msi/psqlodbc_11_01_0000-x86.zip" ` -zipfile_path "$temp_dir\psqlodbc_11_01_0000-x86.zip" ` -zip_internal_msi_file "psqlodbc_x86.msi" ` -msifile_path "$cache_dir\psqlodbc_11_01_0000-x86.msi"; # MySQL 8.0 drivers apparently don't work on Python 2.7 ("system error 126") so install 5.3 instead. # Note, installing MySQL 8.0 ODBC drivers causes the 5.3 drivers to be uninstalled. if ($python_major_version -eq 2) { CheckAndInstallMsiFromUrl ` -driver_name "MySQL ODBC 5.3 ANSI Driver" ` -driver_bitness "32-bit" ` -driver_url "https://dev.mysql.com/get/Downloads/Connector-ODBC/5.3/mysql-connector-odbc-5.3.14-win32.msi" ` -msifile_path "$cache_dir\mysql-connector-odbc-5.3.14-win32.msi"; } else { CheckAndInstallMsiFromUrl ` -driver_name "MySQL ODBC 8.0 ANSI Driver" ` -driver_bitness "32-bit" ` -driver_url "https://dev.mysql.com/get/Downloads/Connector-ODBC/8.0/mysql-connector-odbc-8.0.19-win32.msi" ` -msifile_path "$cache_dir\mysql-connector-odbc-8.0.19-win32.msi"; } } else { Write-Output "ERROR: Unexpected Python architecture:" Write-Output $python_arch } # output the contents of the temporary AppVeyor directories and # the ODBC drivers now available after installation If (${env:APVYR_VERBOSE} -eq "true") { Write-Output "" Write-Output "*** Contents of the cache directory: $cache_dir" Get-ChildItem $cache_dir Write-Output "" Write-Output "*** Contents of the temporary directory: $temp_dir" Get-ChildItem $temp_dir Write-Output "" Write-Output "*** Installed ODBC drivers:" Get-OdbcDriver } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/appveyor/test_connect.py0000664000175000017500000000010300000000000021324 0ustar00mkleehammermkleehammerimport sys import pyodbc c = pyodbc.connect(sys.argv[1]) c.close() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/appveyor/test_script.cmd0000664000175000017500000001640400000000000021325 0ustar00mkleehammermkleehammerREM 0 = success, 1 = failure SET OVERALL_RESULT=0 REM Output a list of the ODBC drivers available to pyodbc ECHO *** Available ODBC Drivers: "%PYTHON_HOME%\python" -c "import pyodbc; print('\n'.join(sorted(pyodbc.drivers())))" REM check if any testing should be done at all IF NOT "%APVYR_RUN_TESTS%" == "true" ( ECHO *** Skipping all the unit tests GOTO :end ) REM Extract the major version of the current Python interpreter, and bitness FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write(str(sys.version_info.major))"`) DO ( SET PYTHON_MAJOR_VERSION=%%F ) FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write('64' if sys.maxsize > 2**32 else '32')"`) DO ( SET PYTHON_ARCH=%%F ) IF %PYTHON_MAJOR_VERSION% EQU 2 ( SET TESTS_DIR=tests2 ) ELSE ( SET TESTS_DIR=tests3 ) :mssql ECHO. ECHO ############################################################ ECHO # MS SQL Server ECHO ############################################################ IF NOT "%APVYR_RUN_MSSQL_TESTS%" == "true" ( ECHO *** Skipping the MS SQL Server unit tests GOTO :postgresql ) ECHO *** Get MS SQL Server version: sqlcmd -S "%MSSQL_INSTANCE%" -U sa -P "Password12!" -Q "SELECT @@VERSION" IF ERRORLEVEL 1 ( ECHO *** ERROR: Could not connect to instance GOTO :postgresql ) ECHO *** Create test database sqlcmd -S "%MSSQL_INSTANCE%" -U sa -P "Password12!" -Q "CREATE DATABASE test_db" IF ERRORLEVEL 1 ( ECHO *** ERROR: Could not create the test database GOTO :postgresql ) :mssql1 REM Native Client 10.0 is so old, it might not be available on the server SET DRIVER={SQL Server Native Client 10.0} SET CONN_STR=Driver=%DRIVER%;Server=%MSSQL_INSTANCE%;Database=test_db;UID=sa;PWD=Password12!; ECHO. ECHO *** Run tests using driver: "%DRIVER%" "%PYTHON_HOME%\python" appveyor\test_connect.py "%CONN_STR%" IF ERRORLEVEL 1 ( REM Don't fail the tests if the driver can't be found ECHO *** INFO: Could not connect using the connection string: ECHO "%CONN_STR%" GOTO :mssql2 ) SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) "%PYTHON_HOME%\python" "%TESTS_DIR%\sqlservertests.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 :mssql2 REM Native Client 11.0 is so old, it might not be available on the server SET DRIVER={SQL Server Native Client 11.0} SET CONN_STR=Driver=%DRIVER%;Server=%MSSQL_INSTANCE%;Database=test_db;UID=sa;PWD=Password12!; ECHO. ECHO *** Run tests using driver: "%DRIVER%" "%PYTHON_HOME%\python" appveyor\test_connect.py "%CONN_STR%" IF ERRORLEVEL 1 ( REM Don't fail the tests if the driver can't be found ECHO *** INFO: Could not connect using the connection string: ECHO "%CONN_STR%" GOTO :mssql3 ) SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) "%PYTHON_HOME%\python" "%TESTS_DIR%\sqlservertests.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 :mssql3 SET DRIVER={ODBC Driver 11 for SQL Server} SET CONN_STR=Driver=%DRIVER%;Server=%MSSQL_INSTANCE%;Database=test_db;UID=sa;PWD=Password12!; ECHO. ECHO *** Run tests using driver: "%DRIVER%" "%PYTHON_HOME%\python" appveyor\test_connect.py "%CONN_STR%" IF ERRORLEVEL 1 ( ECHO *** ERROR: Could not connect using the connection string: ECHO "%CONN_STR%" SET OVERALL_RESULT=1 GOTO :mssql4 ) SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) "%PYTHON_HOME%\python" "%TESTS_DIR%\sqlservertests.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 :mssql4 SET DRIVER={ODBC Driver 13 for SQL Server} SET CONN_STR=Driver=%DRIVER%;Server=%MSSQL_INSTANCE%;Database=test_db;UID=sa;PWD=Password12!; ECHO. ECHO *** Run tests using driver: "%DRIVER%" "%PYTHON_HOME%\python" appveyor\test_connect.py "%CONN_STR%" IF ERRORLEVEL 1 ( ECHO *** ERROR: Could not connect using the connection string: ECHO "%CONN_STR%" SET OVERALL_RESULT=1 GOTO :mssql5 ) SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) "%PYTHON_HOME%\python" "%TESTS_DIR%\sqlservertests.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 :mssql5 SET DRIVER={ODBC Driver 17 for SQL Server} SET CONN_STR=Driver=%DRIVER%;Server=%MSSQL_INSTANCE%;Database=test_db;UID=sa;PWD=Password12!; ECHO. ECHO *** Run tests using driver: "%DRIVER%" "%PYTHON_HOME%\python" appveyor\test_connect.py "%CONN_STR%" IF ERRORLEVEL 1 ( ECHO *** ERROR: Could not connect using the connection string: ECHO "%CONN_STR%" SET OVERALL_RESULT=1 GOTO :postgresql ) SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) "%PYTHON_HOME%\python" "%TESTS_DIR%\sqlservertests.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 :postgresql REM TODO: create a separate database for the tests? ECHO. ECHO ############################################################ ECHO # PostgreSQL ECHO ############################################################ IF NOT "%APVYR_RUN_POSTGRES_TESTS%" == "true" ( ECHO *** Skipping the PostgreSQL unit tests GOTO :mysql ) ECHO *** Get PostgreSQL version SET PGPASSWORD=Password12! "%POSTGRES_PATH%\bin\psql" -U postgres -d postgres -c "SELECT version()" IF %PYTHON_ARCH% EQU 32 ( SET DRIVER={PostgreSQL Unicode} ) ELSE ( SET DRIVER={PostgreSQL Unicode^(x64^)} ) SET CONN_STR=Driver=%DRIVER%;Server=localhost;Port=5432;Database=postgres;Uid=postgres;Pwd=Password12!; ECHO. ECHO *** Run tests using driver: "%DRIVER%" "%PYTHON_HOME%\python" appveyor\test_connect.py "%CONN_STR%" IF ERRORLEVEL 1 ( ECHO *** ERROR: Could not connect using the connection string: ECHO "%CONN_STR%" SET OVERALL_RESULT=1 GOTO :mysql ) SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) "%PYTHON_HOME%\python" "%TESTS_DIR%\pgtests.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 :mysql REM TODO: create a separate database for the tests? (with the right collation) REM https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html REM e.g. CREATE DATABASE test_db CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci; ECHO. ECHO ############################################################ ECHO # MySQL ECHO ############################################################ IF NOT "%APVYR_RUN_MYSQL_TESTS%" == "true" ( ECHO *** Skipping the MySQL unit tests GOTO :end ) ECHO *** Get MySQL version "%MYSQL_PATH%\bin\mysql" -u root -pPassword12! -e "STATUS" :mysql1 REM MySQL 8.0 drivers apparently don't work on Python 2.7 ("system error 126") so use 5.3 instead. IF %PYTHON_MAJOR_VERSION% EQU 2 ( SET DRIVER={MySQL ODBC 5.3 ANSI Driver} ) ELSE ( SET DRIVER={MySQL ODBC 8.0 ANSI Driver} ) SET CONN_STR=Driver=%DRIVER%;Charset=utf8mb4;Server=localhost;Port=3306;Database=mysql;Uid=root;Pwd=Password12!; ECHO. ECHO *** Run tests using driver: "%DRIVER%" "%PYTHON_HOME%\python" appveyor\test_connect.py "%CONN_STR%" IF ERRORLEVEL 1 ( ECHO *** ERROR: Could not connect using the connection string: ECHO "%CONN_STR%" SET OVERALL_RESULT=1 GOTO :end ) SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) "%PYTHON_HOME%\python" "%TESTS_DIR%\mysqltests.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 :end ECHO. EXIT /B %OVERALL_RESULT% ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/appveyor.yml0000664000175000017500000001050600000000000017015 0ustar00mkleehammermkleehammer# This AppVeyor CI configuration file: # - builds pyodbc with multiple versions of Python # - tests the generated pyodbc module against various databases and drivers # - creates "wheel" files for distribution, and stores them as appveyor # artifacts which can be downloaded from the AppVeyor UI # # Various aspects of this file's behavior can be controlled by setting environment # variables in the AppVeyor UI. You will need an AppVeyor account for this (see # the Settings tab -> Environment -> Environment variables). Here are # the relevant variables and their possible string values. Note, "*" indicates the # defaults: # - APVYR_RUN_TESTS - run the unit tests, overall control (true*/false) # - APVYR_RUN_MSSQL_TESTS - run the MS SQL Server unit tests (true*/false) # - APVYR_RUN_POSTGRES_TESTS - run the PostgreSQL unit tests (true*/false) # - APVYR_RUN_MYSQL_TESTS - run the MySQL unit tests (true*/false) # - APVYR_GENERATE_WHEELS - generate distributable wheel files (true/false*) # - APVYR_VERBOSE - output more information to the logs (true/false*) # # For more information about appveyor.yml files, see: https://www.appveyor.com/docs/appveyor-yml/ # the AppVeyor cache is used to carry files between jobs, so make sure the jobs are serialized max_jobs: 1 # the default AppVeyor image for the jobs, but the images are also set in the matrix image: Visual Studio 2019 environment: global: # the following variables can be overridden as necessary through the AppVeyor UI APVYR_RUN_TESTS: "true" APVYR_RUN_MSSQL_TESTS: "true" APVYR_RUN_POSTGRES_TESTS: "true" APVYR_RUN_MYSQL_TESTS: "true" APVYR_GENERATE_WHEELS: "false" APVYR_VERBOSE: "false" # SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the # /E:ON and /V:ON options are not enabled in the batch script intepreter # http://stackoverflow.com/a/13751649/163740 # http://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros#microsoft-specific-predefined-macros WITH_COMPILER: "cmd /E:ON /V:ON /C .\\appveyor\\compile.cmd" # database-related variables, which must match the "services:" section below # ref: https://www.appveyor.com/docs/services-databases/ MSSQL_INSTANCE: "(local)\\SQL2017" POSTGRES_PATH: "C:\\Program Files\\PostgreSQL\\11" MYSQL_PATH: "C:\\Program Files\\MySQL\\MySQL Server 5.7" matrix: # all the Python versions to be tested, both 32-bit and 64-bit # ref: https://www.appveyor.com/docs/windows-images-software/#python # Python 2.7 must be built with Visual Studio 9.0, which is available only # on AppVeyor Windows images Visual Studio 2013 and Visual Studio 2015 - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 PYTHON_HOME: "C:\\Python27" - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 PYTHON_HOME: "C:\\Python27-x64" # - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 # PYTHON_HOME: "C:\\Python33" # - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 # PYTHON_HOME: "C:\\Python33-x64" # - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 # PYTHON_HOME: "C:\\Python34" # - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 # PYTHON_HOME: "C:\\Python34-x64" # Python 3.5+ need at least the Visual Studio 2015 image - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 PYTHON_HOME: "C:\\Python35" - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 PYTHON_HOME: "C:\\Python35-x64" - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 PYTHON_HOME: "C:\\Python36" - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 PYTHON_HOME: "C:\\Python36-x64" - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 PYTHON_HOME: "C:\\Python37" - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 PYTHON_HOME: "C:\\Python37-x64" - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 PYTHON_HOME: "C:\\Python38" - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 PYTHON_HOME: "C:\\Python38-x64" cache: - apvyr_cache -> appveyor\install.ps1 install: - ps: .\appveyor\install.ps1 # ref: https://www.appveyor.com/docs/services-databases/ services: - mssql2017 - postgresql11 - mysql build_script: - call .\appveyor\build_script.cmd test_script: - call .\appveyor\test_script.cmd after_test: - call .\appveyor\after_test.cmd artifacts: - path: 'dist\*.whl' ././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1629404586.295967 pyodbc-4.0.32/docs/0000775000175000017500000000000000000000000015353 5ustar00mkleehammermkleehammer././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/docs/_config.yml0000664000175000017500000000003100000000000017474 0ustar00mkleehammermkleehammertheme: jekyll-theme-slate././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/docs/index.md0000664000175000017500000000115400000000000017005 0ustar00mkleehammermkleehammer# pyodbc pyodbc is an open source Python module that makes accessing ODBC databases simple. It implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) specification but is packed with even more Pythonic convenience. The easiest way to install is to use pip: pip install pyodbc Precompiled binary wheels are provided for most Python versions on Windows and macOS. On other operating systems this will build from source. The rest of the pyodbc documentation is now on a Wiki so it can be maintained by the entire community. [pyodbc Documentation](https://github.com/mkleehammer/pyodbc/wiki) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/issue_template.md0000664000175000017500000000064600000000000017776 0ustar00mkleehammermkleehammerPlease first make sure you have looked at: * Documentation: https://github.com/mkleehammer/pyodbc/wiki * Other issues ### Environment To diagnose, we usually need to know the following, including version numbers. On Windows, be sure to specify 32-bit Python or 64-bit: - Python: - pyodbc: - OS: - DB: - driver: ### Issue Often it is easiest to describe your issue as "expected behavior" and "observed behavior". ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/notes.txt0000664000175000017500000000444200000000000016320 0ustar00mkleehammermkleehammer Unicode ------- http://support.microsoft.com/default.aspx?scid=kb;EN-US;q294169 """ For many of these Unicode functions, the ODBC Programmer's Reference provides incorrect or ambiguous descriptions for some of the function arguments. Specifically, this problem relates to arguments that are used to specify the length of character string input and output values." Regardless of what the documentation says for each ODBC function, the following paragraph from the Unicode section of "Chapter 17: Programming Considerations" in the ODBC Programmer's Reference is the ultimate rule to use for length arguments in Unicode functions: "Unicode functions that always return or take strings or length arguments are passed as count-of-characters. For functions that return length information for server data, the display size and precision are described in number of characters. When a length (transfer size of the data) could refer to string or nonstring data, the length is described in octet lengths. For example, SQLGetInfoW will still take the length as count-of-bytes, but SQLExecDirectW will use count-of-characters." This means that if the argument in question describes the length of another argument that is always a string (typically represented as a SQLCHAR), then the length reflects the number of characters in the string. If the length argument describes another argument that could be a string or some other data type (typically represented as a SQLPOINTER), the length is in bytes. """ Driver Support" * PostgreSQL seems to correct use UCS-2. http://archives.postgresql.org/pgsql-odbc/2006-02/msg00112.php * MS SQL Server on Windows & Linux. Obviously correctly uses UCS-2. SQL Server 2012 and higher now support UTF16. * mysql: Seems to be broken. To handle this, probably need to provide a 'charset' option that causes us to convert to the given charset and use the ANSI/ASCII calls and data types. http://mysqlworkbench.org/?p=1399 * FreeTDS http://www.freetds.org/userguide/unicodefreetds.htm Definitely use 0.91 or later. Have seen reference to a new --wide-unicode flag for 0.92+ (broken in 0.91) which causes SQL_WCHAR to equal wchar_t instead of UTF16. ././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1629404586.295967 pyodbc-4.0.32/pyodbc.egg-info/0000775000175000017500000000000000000000000017375 5ustar00mkleehammermkleehammer././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629404586.0 pyodbc-4.0.32/pyodbc.egg-info/PKG-INFO0000664000175000017500000000461400000000000020477 0ustar00mkleehammermkleehammerMetadata-Version: 2.1 Name: pyodbc Version: 4.0.32 Summary: DB API Module for ODBC Home-page: https://github.com/mkleehammer/pyodbc Maintainer: Michael Kleehammer Maintainer-email: michael@kleehammer.com License: MIT Description: # pyodbc [![Windows Status](https://ci.appveyor.com/api/projects/status/github/mkleehammer/pyodbc?branch=master&svg=true&passingText=Windows%20build)](https://ci.appveyor.com/project/mkleehammer/pyodbc) [![Ubuntu build](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml/badge.svg)](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml) [![PyPI](https://img.shields.io/pypi/v/pyodbc?color=brightgreen)](https://pypi.org/project/pyodbc/) pyodbc is an open source Python module that makes accessing ODBC databases simple. It implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) specification but is packed with even more Pythonic convenience. The easiest way to install is to use pip: pip install pyodbc Precompiled binary wheels are provided for most Python versions on Windows and macOS. On other operating systems this will build from source. Note, pyodbc contains C++ extensions so you will need a suitable C++ compiler on your computer to install pyodbc, for all operating systems. See the [docs](https://github.com/mkleehammer/pyodbc/wiki/Install) for details. [Documentation](https://github.com/mkleehammer/pyodbc/wiki) [Release Notes](https://github.com/mkleehammer/pyodbc/releases) Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Topic :: Database Description-Content-Type: text/markdown ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629404586.0 pyodbc-4.0.32/pyodbc.egg-info/SOURCES.txt0000664000175000017500000000307000000000000021261 0ustar00mkleehammermkleehammer.flake8 .gitignore LICENSE.txt MANIFEST.in README.md appveyor.yml issue_template.md notes.txt setup.py appveyor/after_test.cmd appveyor/build_script.cmd appveyor/compile.cmd appveyor/install.ps1 appveyor/test_connect.py appveyor/test_script.cmd docs/_config.yml docs/index.md pyodbc.egg-info/PKG-INFO pyodbc.egg-info/SOURCES.txt pyodbc.egg-info/dependency_links.txt pyodbc.egg-info/top_level.txt src/buffer.cpp src/buffer.h src/cnxninfo.cpp src/cnxninfo.h src/connection.cpp src/connection.h src/cursor.cpp src/cursor.h src/dbspecific.h src/errors.cpp src/errors.h src/getdata.cpp src/getdata.h src/params.cpp src/params.h src/pyodbc.h src/pyodbc.pyi src/pyodbc.rc src/pyodbccompat.cpp src/pyodbccompat.h src/pyodbcdbg.cpp src/pyodbcmodule.cpp src/pyodbcmodule.h src/resource.h src/row.cpp src/row.h src/textenc.cpp src/textenc.h src/wrapper.h tests2/accesstests.py tests2/dbapi20.py tests2/dbapitests.py tests2/empty.accdb tests2/empty.mdb tests2/exceltests.py tests2/informixtests.py tests2/mysqltests.py tests2/pgtests.py tests2/sqldwtests.py tests2/sqlite.db tests2/sqlitetests.py tests2/sqlservertests.py tests2/test.py tests2/test.xls tests2/testbase.py tests2/testutils.py tests2/testutils.pyc tests3/accesstests.py tests3/dbapi20.py tests3/dbapitests.py tests3/empty.accdb tests3/empty.mdb tests3/exceltests.py tests3/informixtests.py tests3/issue802.py tests3/mysqltests.py tests3/pgtests.py tests3/sparktests.py tests3/sqldwtests.py tests3/sqlitetests.py tests3/sqlservertests.py tests3/test.py tests3/testbase.py tests3/testutils.py utils/build-releases.cmd utils/build-releases.sh././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629404586.0 pyodbc-4.0.32/pyodbc.egg-info/dependency_links.txt0000664000175000017500000000000100000000000023443 0ustar00mkleehammermkleehammer ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629404586.0 pyodbc-4.0.32/pyodbc.egg-info/top_level.txt0000664000175000017500000000000700000000000022124 0ustar00mkleehammermkleehammerpyodbc ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1629404586.3039672 pyodbc-4.0.32/setup.cfg0000664000175000017500000000004600000000000016244 0ustar00mkleehammermkleehammer[egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1622310926.0 pyodbc-4.0.32/setup.py0000775000175000017500000002772100000000000016151 0ustar00mkleehammermkleehammer#!/usr/bin/env python import sys, os, re from os.path import exists, abspath, dirname, join, isdir, relpath, expanduser try: # Allow use of setuptools so eggs can be built. from setuptools import setup, Command except ImportError: from distutils.core import setup, Command from distutils.extension import Extension from distutils.errors import * if sys.hexversion >= 0x03000000: from configparser import ConfigParser else: from ConfigParser import ConfigParser OFFICIAL_BUILD = 9999 def _print(s): # Python 2/3 compatibility sys.stdout.write(s + '\n') class VersionCommand(Command): description = "prints the pyodbc version, determined from git" user_options = [] def initialize_options(self): self.verbose = 0 def finalize_options(self): pass def run(self): version_str, _version = get_version() sys.stdout.write(version_str + '\n') class TagsCommand(Command): description = 'runs etags' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): # Windows versions of etag do not seem to expand wildcards (which Unix shells normally do for Unix utilities), # so find all of the files ourselves. files = [ join('src', f) for f in os.listdir('src') if f.endswith(('.h', '.cpp')) ] cmd = 'etags %s' % ' '.join(files) return os.system(cmd) def main(): version_str, version = get_version() with open(join(dirname(abspath(__file__)), 'README.md')) as f: long_description = f.read() settings = get_compiler_settings(version_str) files = [ relpath(join('src', f)) for f in os.listdir('src') if f.endswith('.cpp') ] if exists('MANIFEST'): os.remove('MANIFEST') kwargs = { 'name': "pyodbc", 'version': version_str, 'description': "DB API Module for ODBC", 'long_description': long_description, 'long_description_content_type': 'text/markdown', 'maintainer': "Michael Kleehammer", 'maintainer_email': "michael@kleehammer.com", 'ext_modules': [Extension('pyodbc', sorted(files), **settings)], 'data_files': [ ('', ['src/pyodbc.pyi']) # places pyodbc.pyi alongside pyodbc.py in site-packages ], 'license': 'MIT', 'classifiers': ['Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Database', ], 'url': 'https://github.com/mkleehammer/pyodbc', 'cmdclass': { 'version' : VersionCommand, 'tags' : TagsCommand } } if sys.hexversion >= 0x02060000: kwargs['options'] = { 'bdist_wininst': {'user_access_control' : 'auto'} } setup(**kwargs) def get_compiler_settings(version_str): settings = { 'extra_compile_args' : [], 'extra_link_args': [], 'libraries': [], 'include_dirs': [], 'define_macros' : [ ('PYODBC_VERSION', version_str) ] } # This isn't the best or right way to do this, but I don't see how someone is supposed to sanely subclass the build # command. for option in ['assert', 'trace', 'leak-check']: try: sys.argv.remove('--%s' % option) settings['define_macros'].append(('PYODBC_%s' % option.replace('-', '_').upper(), 1)) except ValueError: pass if os.name == 'nt': settings['extra_compile_args'].extend([ '/Wall', '/wd4514', # unreference inline function removed '/wd4820', # padding after struct member '/wd4668', # is not defined as a preprocessor macro '/wd4711', # function selected for automatic inline expansion '/wd4100', # unreferenced formal parameter '/wd4127', # "conditional expression is constant" testing compilation constants '/wd4191', # casts to PYCFunction which doesn't have the keywords parameter ]) if '--windbg' in sys.argv: # Used only temporarily to add some debugging flags to get better stack traces in # the debugger. This is not related to building debug versions of Python which use # "--debug". sys.argv.remove('--windbg') settings['extra_compile_args'].extend('/Od /Ge /GS /GZ /RTC1 /Wp64 /Yd'.split()) # Visual Studio 2019 defaults to using __CxxFrameHandler4 which is in # VCRUNTIME140_1.DLL which Python 3.7 and earlier are not linked to. This requirement # means pyodbc will not load unless the user has installed a UCRT update. Turn this # off to match the Python 3.7 settings. # # Unfortunately these are *hidden* settings. I guess we should be glad they actually # made the settings. # https://lectem.github.io/msvc/reverse-engineering/build/2019/01/21/MSVC-hidden-flags.html if sys.hexversion >= 0x03050000: settings['extra_compile_args'].append('/d2FH4-') settings['extra_link_args'].append('/d2:-FH4-') settings['libraries'].append('odbc32') settings['libraries'].append('advapi32') elif os.environ.get("OS", '').lower().startswith('windows'): # Windows Cygwin (posix on windows) # OS name not windows, but still on Windows settings['libraries'].append('odbc32') elif sys.platform == 'darwin': # The latest versions of OS X no longer ship with iodbc. Assume # unixODBC for now. settings['libraries'].append('odbc') # Python functions take a lot of 'char *' that really should be const. gcc complains about this *a lot* settings['extra_compile_args'].extend([ '-Wno-write-strings', '-Wno-deprecated-declarations' ]) # Apple has decided they won't maintain the iODBC system in OS/X and has added # deprecation warnings in 10.8. For now target 10.7 to eliminate the warnings. settings['define_macros'].append(('MAC_OS_X_VERSION_10_7',)) # Add directories for MacPorts and Homebrew. dirs = ['/usr/local/include', '/opt/local/include', expanduser('~/homebrew/include')] settings['include_dirs'].extend(dir for dir in dirs if isdir(dir)) # unixODBC make/install places libodbc.dylib in /usr/local/lib/ by default # ( also OS/X since El Capitan prevents /usr/lib from being accessed ) settings['library_dirs'] = ['/usr/local/lib'] else: # Other posix-like: Linux, Solaris, etc. # Python functions take a lot of 'char *' that really should be const. gcc complains about this *a lot* settings['extra_compile_args'].append('-Wno-write-strings') cflags = os.popen('odbc_config --cflags 2>/dev/null').read().strip() if cflags: settings['extra_compile_args'].extend(cflags.split()) ldflags = os.popen('odbc_config --libs 2>/dev/null').read().strip() if ldflags: settings['extra_link_args'].extend(ldflags.split()) from array import array UNICODE_WIDTH = array('u').itemsize # if UNICODE_WIDTH == 4: # # This makes UnixODBC use UCS-4 instead of UCS-2, which works better with sizeof(wchar_t)==4. # # Thanks to Marc-Antoine Parent # settings['define_macros'].append(('SQL_WCHART_CONVERT', '1')) # What is the proper way to detect iODBC, MyODBC, unixODBC, etc.? settings['libraries'].append('odbc') return settings def get_version(): """ Returns the version of the product as (description, [major,minor,micro,beta]). If the release is official, `beta` will be 9999 (OFFICIAL_BUILD). 1. If in a git repository, use the latest tag (git describe). 2. If in an unzipped source directory (from setup.py sdist), read the version from the PKG-INFO file. 3. Use 4.0.0.0 and complain a lot. """ # My goal is to (1) provide accurate tags for official releases but (2) not have to manage tags for every test # release. # # Official versions are tagged using 3 numbers: major, minor, micro. A build of a tagged version should produce # the version using just these pieces, such as 2.1.4. # # Unofficial versions are "working towards" the next version. So the next unofficial build after 2.1.4 would be a # beta for 2.1.5. Using 'git describe' we can find out how many changes have been made after 2.1.4 and we'll use # this count as the beta id (beta1, beta2, etc.) # # Since the 4 numbers are put into the Windows DLL, we want to make sure the beta versions sort *before* the # official, so we set the official build number to 9999, but we don't show it. name = None # branch/feature name. Should be None for official builds. numbers = None # The 4 integers that make up the version. # If this is a source release the version will have already been assigned and be in the PKG-INFO file. name, numbers = _get_version_pkginfo() # If not a source release, we should be in a git repository. Look for the latest tag. if not numbers: name, numbers = _get_version_git() if not numbers: _print('WARNING: Unable to determine version. Using 4.0.0.0') name, numbers = '4.0.0-unsupported', [4,0,0,0] return name, numbers def _get_version_pkginfo(): filename = join(dirname(abspath(__file__)), 'PKG-INFO') if exists(filename): re_ver = re.compile(r'^Version: \s+ (\d+)\.(\d+)\.(\d+) (?: b(\d+))?', re.VERBOSE) for line in open(filename): match = re_ver.search(line) if match: name = line.split(':', 1)[1].strip() numbers = [int(n or 0) for n in match.groups()[:3]] numbers.append(int(match.group(4) or OFFICIAL_BUILD)) # don't use 0 as a default for build return name, numbers return None, None def _get_version_git(): n, result = getoutput("git describe --tags --match [0-9]*") if n: _print('WARNING: git describe failed with: %s %s' % (n, result)) return None, None match = re.match(r'(\d+).(\d+).(\d+) (?: -(\d+)-g[0-9a-z]+)?', result, re.VERBOSE) if not match: return None, None numbers = [int(n or OFFICIAL_BUILD) for n in match.groups()] if numbers[-1] == OFFICIAL_BUILD: name = '%s.%s.%s' % tuple(numbers[:3]) if numbers[-1] != OFFICIAL_BUILD: # This is a beta of the next micro release, so increment the micro number to reflect this. numbers[-2] += 1 name = '%s.%s.%sb%d' % tuple(numbers) n, result = getoutput('git rev-parse --abbrev-ref HEAD') if result == 'HEAD': # We are not on a branch, so use the last revision instead n, result = getoutput('git rev-parse --short HEAD') name = name + '+commit' + result else: if result != 'master' and not re.match(r'^v\d+$', result): name = name + '+' + result.replace('-', '') return name, numbers def getoutput(cmd): pipe = os.popen(cmd, 'r') text = pipe.read().rstrip('\n') status = pipe.close() or 0 return status, text if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1629404586.295967 pyodbc-4.0.32/src/0000775000175000017500000000000000000000000015212 5ustar00mkleehammermkleehammer././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/buffer.cpp0000664000175000017500000000366200000000000017176 0ustar00mkleehammermkleehammer // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pyodbc.h" #if PY_MAJOR_VERSION < 3 #include "buffer.h" #include "pyodbcmodule.h" Py_ssize_t PyBuffer_GetMemory(PyObject* buffer, const char** pp) { PyBufferProcs* procs = Py_TYPE(buffer)->tp_as_buffer; if (!procs || !PyType_HasFeature(Py_TYPE(buffer), Py_TPFLAGS_HAVE_GETCHARBUFFER)) { // Can't access the memory directly because the buffer object doesn't support it. return -1; } if (procs->bf_getsegcount(buffer, 0) != 1) { // Can't access the memory directly because there is more than one segment. return -1; } #if PY_VERSION_HEX >= 0x02050000 char* pT = 0; #else const char* pT = 0; #endif Py_ssize_t cb = procs->bf_getcharbuffer(buffer, 0, &pT); if (pp) *pp = pT; return cb; } Py_ssize_t PyBuffer_Size(PyObject* self) { if (!PyBuffer_Check(self)) { PyErr_SetString(PyExc_TypeError, "Not a buffer!"); return 0; } Py_ssize_t total_len = 0; Py_TYPE(self)->tp_as_buffer->bf_getsegcount(self, &total_len); return total_len; } #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/buffer.h0000664000175000017500000000420600000000000016636 0ustar00mkleehammermkleehammer // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef _BUFFER_H #define _BUFFER_H #if PY_MAJOR_VERSION < 3 // If the buffer object has a single, accessible segment, returns the length of the buffer. If 'pp' is not NULL, the // address of the segment is also returned. If there is more than one segment or if it cannot be accessed, -1 is // returned and 'pp' is not modified. Py_ssize_t PyBuffer_GetMemory(PyObject* buffer, const char** pp); // Returns the size of a Python buffer. // // If an error occurs, zero is returned, but zero is a valid buffer size (I guess), so use PyErr_Occurred to determine // if it represents a failure. Py_ssize_t PyBuffer_Size(PyObject* self); class BufferSegmentIterator { PyObject* pBuffer; Py_ssize_t iSegment; Py_ssize_t cSegments; public: BufferSegmentIterator(PyObject* _pBuffer) { pBuffer = _pBuffer; PyBufferProcs* procs = Py_TYPE(pBuffer)->tp_as_buffer; iSegment = 0; cSegments = procs->bf_getsegcount(pBuffer, 0); } bool Next(byte*& pb, SQLLEN &cb) { if (iSegment >= cSegments) return false; PyBufferProcs* procs = Py_TYPE(pBuffer)->tp_as_buffer; cb = procs->bf_getreadbuffer(pBuffer, iSegment++, (void**)&pb); return true; } }; #endif // PY_MAJOR_VERSION #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/cnxninfo.cpp0000664000175000017500000001557600000000000017556 0ustar00mkleehammermkleehammer// There is a bunch of information we want from connections which requires calls to SQLGetInfo when we first connect. // However, this isn't something we really want to do for every connection, so we cache it by the hash of the // connection string. When we create a new connection, we copy the values into the connection structure. // // We hash the connection string since it may contain sensitive information we wouldn't want exposed in a core dump. #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "cnxninfo.h" #include "connection.h" // Maps from a Python string of the SHA1 hash to a CnxnInfo object. // static PyObject* map_hash_to_info; static PyObject* hashlib; // The hashlib module static PyObject* update; // The string 'update', used in GetHash. bool CnxnInfo_init() { // Called during startup to give us a chance to import the hash code. If we can't find it, we'll print a warning // to the console and not cache anything. // First try hashlib which was added in 2.5. 2.6 complains using warnings which we don't want affecting the // caller. map_hash_to_info = PyDict_New(); update = PyString_FromString("update"); if (!map_hash_to_info || !update) return false; hashlib = PyImport_ImportModule("hashlib"); if (!hashlib) return false; return true; } static PyObject* GetHash(PyObject* p) { #if PY_MAJOR_VERSION >= 3 Object bytes(PyUnicode_EncodeUTF8(PyUnicode_AS_UNICODE(p), PyUnicode_GET_SIZE(p), 0)); if (!bytes) return 0; p = bytes.Get(); #else Object bytes(PyUnicode_Check(p) ? PyUnicode_EncodeUTF8(PyUnicode_AS_UNICODE(p), PyUnicode_GET_SIZE(p), 0) : 0); if (PyUnicode_Check(p)) { if (!bytes) return 0; p = bytes.Get(); } #endif Object hash(PyObject_CallMethod(hashlib, "new", "s", "sha1")); if (!hash.IsValid()) return 0; Object result(PyObject_CallMethodObjArgs(hash, update, p, 0)); if (!result.IsValid()) return 0; return PyObject_CallMethod(hash, "hexdigest", 0); } inline void GetColumnSize(Connection* cnxn, SQLSMALLINT sqltype, int* psize) { // For some reason I can't seem to reuse the HSTMT multiple times in a row here. Until I // figure it out I'll simply allocate a new one each time. HSTMT hstmt; if (!SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_STMT, cnxn->hdbc, &hstmt))) return; SQLINTEGER columnsize; if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, sqltype)) && SQL_SUCCEEDED(SQLFetch(hstmt)) && SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0))) { // I believe some drivers are returning negative numbers for "unlimited" text fields, // such as FileMaker. Ignore anything that seems too small. if (columnsize >= 1) *psize = (int)columnsize; } SQLFreeStmt(hstmt, SQL_CLOSE); SQLFreeHandle(SQL_HANDLE_STMT, hstmt); } static PyObject* CnxnInfo_New(Connection* cnxn) { #ifdef _MSC_VER #pragma warning(disable : 4365) #endif CnxnInfo* p = PyObject_NEW(CnxnInfo, &CnxnInfoType); if (!p) return 0; Object info((PyObject*)p); // set defaults p->odbc_major = 0; p->odbc_minor = 0; p->supports_describeparam = false; p->datetime_precision = 19; // default: "yyyy-mm-dd hh:mm:ss" p->need_long_data_len = false; p->varchar_maxlength = 1 * 1024 * 1024 * 1024; p->wvarchar_maxlength = 1 * 1024 * 1024 * 1024; p->binary_maxlength = 1 * 1024 * 1024 * 1024; // WARNING: The GIL lock is released for the *entire* function here. Do not // touch any objects, call Python APIs, etc. We are simply making ODBC // calls and setting atomic values (ints & chars). Also, make sure the lock // gets released -- do not add an early exit. SQLRETURN ret; Py_BEGIN_ALLOW_THREADS char szVer[20]; SQLSMALLINT cch = 0; ret = SQLGetInfo(cnxn->hdbc, SQL_DRIVER_ODBC_VER, szVer, _countof(szVer), &cch); if (SQL_SUCCEEDED(ret)) { char* dot = strchr(szVer, '.'); if (dot) { *dot = '\0'; p->odbc_major=(char)atoi(szVer); p->odbc_minor=(char)atoi(dot + 1); } } char szYN[2]; if (SQL_SUCCEEDED(SQLGetInfo(cnxn->hdbc, SQL_DESCRIBE_PARAMETER, szYN, _countof(szYN), &cch))) p->supports_describeparam = szYN[0] == 'Y'; if (SQL_SUCCEEDED(SQLGetInfo(cnxn->hdbc, SQL_NEED_LONG_DATA_LEN, szYN, _countof(szYN), &cch))) p->need_long_data_len = (szYN[0] == 'Y'); GetColumnSize(cnxn, SQL_VARCHAR, &p->varchar_maxlength); GetColumnSize(cnxn, SQL_WVARCHAR, &p->wvarchar_maxlength); GetColumnSize(cnxn, SQL_VARBINARY, &p->binary_maxlength); GetColumnSize(cnxn, SQL_TYPE_TIMESTAMP, &p->datetime_precision); Py_END_ALLOW_THREADS return info.Detach(); } PyObject* GetConnectionInfo(PyObject* pConnectionString, Connection* cnxn) { // Looks-up or creates a CnxnInfo object for the given connection string. The connection string can be a Unicode // or String object. Object hash(GetHash(pConnectionString)); if (hash.IsValid()) { PyObject* info = PyDict_GetItem(map_hash_to_info, hash); if (info) { Py_INCREF(info); return info; } } PyObject* info = CnxnInfo_New(cnxn); if (info != 0 && hash.IsValid()) PyDict_SetItem(map_hash_to_info, hash, info); return info; } PyTypeObject CnxnInfoType = { PyVarObject_HEAD_INIT(0, 0) "pyodbc.CnxnInfo", // tp_name sizeof(CnxnInfo), // tp_basicsize 0, // tp_itemsize 0, // destructor tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare 0, // tp_repr 0, // tp_as_number 0, // tp_as_sequence 0, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, // tp_flags }; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/cnxninfo.h0000664000175000017500000000353600000000000017214 0ustar00mkleehammermkleehammer // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef CNXNINFO_H #define CNXNINFO_H struct Connection; extern PyTypeObject CnxnInfoType; struct CnxnInfo { PyObject_HEAD // The description of these fields is in the connection structure. char odbc_major; char odbc_minor; bool supports_describeparam; int datetime_precision; // Do we need to use SQL_LEN_DATA_AT_EXEC? Some drivers (e.g. FreeTDS 0.91) have problems with long values, so // we'll use SQL_DATA_AT_EXEC when possible. If this is true, however, we'll need to pass the length. bool need_long_data_len; // These are from SQLGetTypeInfo.column_size, so the char ones are in characters, not bytes. int varchar_maxlength; int wvarchar_maxlength; int binary_maxlength; }; bool CnxnInfo_init(); // Looks-up or creates a CnxnInfo object for the given connection string. The connection string can be a Unicode or // String object. PyObject* GetConnectionInfo(PyObject* pConnectionString, Connection* cnxn); #endif // CNXNINFO_H ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629144457.0 pyodbc-4.0.32/src/connection.cpp0000664000175000017500000015016300000000000020063 0ustar00mkleehammermkleehammer// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pyodbc.h" #include "buffer.h" #include "wrapper.h" #include "textenc.h" #include "connection.h" #include "cursor.h" #include "pyodbcmodule.h" #include "errors.h" #include "cnxninfo.h" #if PY_MAJOR_VERSION < 3 static bool IsStringType(PyObject* t) { return (void*)t == (void*)&PyString_Type; } static bool IsUnicodeType(PyObject* t) { return (void*)t == (void*)&PyUnicode_Type; } #endif static char connection_doc[] = "Connection objects manage connections to the database.\n" "\n" "Each manages a single ODBC HDBC."; static Connection* Connection_Validate(PyObject* self) { Connection* cnxn; if (self == 0 || !Connection_Check(self)) { PyErr_SetString(PyExc_TypeError, "Connection object required"); return 0; } cnxn = (Connection*)self; if (cnxn->hdbc == SQL_NULL_HANDLE) { PyErr_SetString(ProgrammingError, "Attempt to use a closed connection."); return 0; } return cnxn; } static bool Connect(PyObject* pConnectString, HDBC hdbc, bool fAnsi, long timeout, Object& encoding) { // This should have been checked by the global connect function. I(PyString_Check(pConnectString) || PyUnicode_Check(pConnectString)); // The driver manager determines if the app is a Unicode app based on whether we call SQLDriverConnectA or // SQLDriverConnectW. Some drivers, notably Microsoft Access/Jet, change their behavior based on this, so we try // the Unicode version first. (The Access driver only supports Unicode text, but SQLDescribeCol returns SQL_CHAR // instead of SQL_WCHAR if we connect with the ANSI version. Obviously this causes lots of errors since we believe // what it tells us (SQL_CHAR).) // Python supports only UCS-2 and UCS-4, so we shouldn't need to worry about receiving surrogate pairs. However, // Windows does use UCS-16, so it is possible something would be misinterpreted as one. We may need to examine // this more. SQLRETURN ret; if (timeout > 0) { Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(hdbc, SQL_ATTR_LOGIN_TIMEOUT, (SQLPOINTER)(uintptr_t)timeout, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) RaiseErrorFromHandle(0, "SQLSetConnectAttr(SQL_ATTR_LOGIN_TIMEOUT)", hdbc, SQL_NULL_HANDLE); } const char* szEncoding = 0; Object encBytes; if (encoding) { #if PY_MAJOR_VERSION < 3 if (PyString_Check(encoding)) { szEncoding = PyString_AsString(encoding); if (!szEncoding) return false; } #endif if (PyUnicode_Check(encoding)) { #if PY_MAJOR_VERSION < 3 encBytes = PyUnicode_AsUTF8String(encoding); if (!encBytes) return false; szEncoding = PyString_AS_STRING(encBytes.Get()); #else szEncoding = PyUnicode_AsUTF8(encoding); #endif } } if (!fAnsi) { // I want to call the W version when possible since the driver can use it as an // indication that we can handle Unicode. SQLWChar wchar(pConnectString, szEncoding ? szEncoding : ENCSTR_UTF16NE); if (!wchar.isValid()) return false; Py_BEGIN_ALLOW_THREADS ret = SQLDriverConnectW(hdbc, 0, wchar.psz, SQL_NTS, 0, 0, 0, SQL_DRIVER_NOPROMPT); Py_END_ALLOW_THREADS if (SQL_SUCCEEDED(ret)) return true; } SQLWChar wchar(pConnectString, szEncoding ? szEncoding : "utf-8"); if (!wchar.isValid()) return false; Py_BEGIN_ALLOW_THREADS ret = SQLDriverConnect(hdbc, 0, (SQLCHAR*)wchar.psz, SQL_NTS, 0, 0, 0, SQL_DRIVER_NOPROMPT); Py_END_ALLOW_THREADS if (SQL_SUCCEEDED(ret)) return true; RaiseErrorFromHandle(0, "SQLDriverConnect", hdbc, SQL_NULL_HANDLE); return false; } static bool ApplyPreconnAttrs(HDBC hdbc, SQLINTEGER ikey, PyObject *value, char *strencoding) { SQLRETURN ret; SQLPOINTER ivalue = 0; SQLINTEGER vallen = 0; if (PyLong_Check(value)) { if (_PyLong_Sign(value) >= 0) { ivalue = (SQLPOINTER)PyLong_AsUnsignedLong(value); vallen = SQL_IS_UINTEGER; } else { ivalue = (SQLPOINTER)PyLong_AsLong(value); vallen = SQL_IS_INTEGER; } } #if PY_MAJOR_VERSION < 3 else if (PyInt_Check(value)) { ivalue = (SQLPOINTER)PyInt_AsLong(value); vallen = SQL_IS_INTEGER; } else if (PyBuffer_Check(value)) { // We can only assume and take the first segment. PyBuffer_GetMemory(value, (const char**)&ivalue); vallen = SQL_IS_POINTER; } #endif #if PY_VERSION_HEX >= 0x02060000 else if (PyByteArray_Check(value)) { ivalue = (SQLPOINTER)PyByteArray_AsString(value); vallen = SQL_IS_POINTER; } #endif else if (PyBytes_Check(value)) { ivalue = PyBytes_AS_STRING(value); #if PY_MAJOR_VERSION < 3 vallen = SQL_NTS; #else vallen = SQL_IS_POINTER; #endif } else if (PyUnicode_Check(value)) { Object stringholder; if (sizeof(Py_UNICODE) == 2 // This part should be compile-time. && (!strencoding || !strcmp(strencoding, "utf-16le"))) { // default or utf-16le is set, pass through directly ivalue = PyUnicode_AS_UNICODE(value); } else { // use strencoding to convert, default to utf-16le if not set. stringholder = PyCodec_Encode(value, strencoding ? strencoding : "utf-16le", "strict"); ivalue = PyBytes_AS_STRING(stringholder.Get()); } vallen = SQL_NTS; Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttrW(hdbc, ikey, ivalue, vallen); Py_END_ALLOW_THREADS goto checkSuccess; } else if (PySequence_Check(value)) { // To allow for possibility of setting multiple attributes more than once. Py_ssize_t len = PySequence_Size(value); for (Py_ssize_t i = 0; i < len; i++) { Object v(PySequence_GetItem(value, i)); if (!ApplyPreconnAttrs(hdbc, ikey, v.Get(), strencoding)) return false; } return true; } Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(hdbc, ikey, ivalue, vallen); Py_END_ALLOW_THREADS checkSuccess: if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(0, "SQLSetConnectAttr", hdbc, SQL_NULL_HANDLE); Py_BEGIN_ALLOW_THREADS SQLFreeHandle(SQL_HANDLE_DBC, hdbc); Py_END_ALLOW_THREADS return false; } return true; } PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi, long timeout, bool fReadOnly, PyObject* attrs_before, Object& encoding) { // pConnectString // A string or unicode object. (This must be checked by the caller.) // // fAnsi // If true, do not attempt a Unicode connection. // // Allocate HDBC and connect // Object attrs_before_o(attrs_before); HDBC hdbc = SQL_NULL_HANDLE; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLAllocHandle(SQL_HANDLE_DBC, henv, &hdbc); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(0, "SQLAllocHandle", SQL_NULL_HANDLE, SQL_NULL_HANDLE); // // Attributes that must be set before connecting. // if (attrs_before) { Py_ssize_t pos = 0; PyObject* key = 0; PyObject* value = 0; Object encodingholder; char *strencoding = encoding.Get() ? (PyUnicode_Check(encoding) ? PyBytes_AsString(encodingholder = PyCodec_Encode(encoding, "utf-8", "strict")) : PyBytes_Check(encoding) ? PyBytes_AsString(encoding) : 0) : 0; while (PyDict_Next(attrs_before, &pos, &key, &value)) { SQLINTEGER ikey = 0; if (PyLong_Check(key)) ikey = (int)PyLong_AsLong(key); #if PY_MAJOR_VERSION < 3 else if (PyInt_Check(key)) ikey = (int)PyInt_AsLong(key); #endif if (!ApplyPreconnAttrs(hdbc, ikey, value, strencoding)) { return 0; } } } if (!Connect(pConnectString, hdbc, fAnsi, timeout, encoding)) { // Connect has already set an exception. Py_BEGIN_ALLOW_THREADS SQLFreeHandle(SQL_HANDLE_DBC, hdbc); Py_END_ALLOW_THREADS return 0; } // // Connected, so allocate the Connection object. // // Set all variables to something valid, so we don't crash in dealloc if this function fails. #ifdef _MSC_VER #pragma warning(disable : 4365) #endif Connection* cnxn = PyObject_NEW(Connection, &ConnectionType); #ifdef _MSC_VER #pragma warning(default : 4365) #endif if (cnxn == 0) { Py_BEGIN_ALLOW_THREADS SQLFreeHandle(SQL_HANDLE_DBC, hdbc); Py_END_ALLOW_THREADS return 0; } cnxn->hdbc = hdbc; cnxn->nAutoCommit = fAutoCommit ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF; cnxn->searchescape = 0; cnxn->maxwrite = 0; cnxn->timeout = 0; cnxn->conv_count = 0; cnxn->conv_types = 0; cnxn->conv_funcs = 0; cnxn->attrs_before = attrs_before_o.Detach(); // This is an inefficient default, but should work all the time. When we are offered // single-byte text we don't actually know what the encoding is. For example, with SQL // Server the encoding is based on the database's collation. We ask the driver / DB to // convert to SQL_C_WCHAR and use the ODBC default of UTF-16LE. cnxn->sqlchar_enc.optenc = OPTENC_UTF16NE; cnxn->sqlchar_enc.name = _strdup(ENCSTR_UTF16NE); cnxn->sqlchar_enc.ctype = SQL_C_WCHAR; cnxn->sqlwchar_enc.optenc = OPTENC_UTF16NE; cnxn->sqlwchar_enc.name = _strdup(ENCSTR_UTF16NE); cnxn->sqlwchar_enc.ctype = SQL_C_WCHAR; cnxn->metadata_enc.optenc = OPTENC_UTF16NE; cnxn->metadata_enc.name = _strdup(ENCSTR_UTF16NE); cnxn->metadata_enc.ctype = SQL_C_WCHAR; // Note: I attempted to use UTF-8 here too since it can hold any type, but SQL Server fails // with a data truncation error if we send something encoded in 2 bytes to a column with 1 // character. I don't know if this is a bug in SQL Server's driver or if I'm missing // something, so we'll stay with the default ODBC conversions. cnxn->unicode_enc.optenc = OPTENC_UTF16NE; cnxn->unicode_enc.name = _strdup(ENCSTR_UTF16NE); cnxn->unicode_enc.ctype = SQL_C_WCHAR; #if PY_MAJOR_VERSION < 3 cnxn->str_enc.optenc = OPTENC_UTF8; cnxn->str_enc.name = _strdup("utf-8"); cnxn->str_enc.ctype = SQL_C_CHAR; cnxn->sqlchar_enc.to = TO_UNICODE; cnxn->sqlwchar_enc.to = TO_UNICODE; cnxn->metadata_enc.to = TO_UNICODE; #endif if (!cnxn->sqlchar_enc.name || !cnxn->sqlwchar_enc.name || !cnxn->metadata_enc.name || !cnxn->unicode_enc.name #if PY_MAJOR_VERSION < 3 || !cnxn->str_enc.name #endif ) { PyErr_NoMemory(); Py_DECREF(cnxn); return 0; } // // Initialize autocommit mode. // // The DB API says we have to default to manual-commit, but ODBC defaults to auto-commit. We also provide a // keyword parameter that allows the user to override the DB API and force us to start in auto-commit (in which // case we don't have to do anything). if (fAutoCommit == false) { Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)cnxn->nAutoCommit, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLSetConnectAttr(SQL_ATTR_AUTOCOMMIT)", cnxn->hdbc, SQL_NULL_HANDLE); Py_DECREF(cnxn); return 0; } } if (fReadOnly) { Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_ACCESS_MODE, (SQLPOINTER)SQL_MODE_READ_ONLY, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLSetConnectAttr(SQL_ATTR_ACCESS_MODE)", cnxn->hdbc, SQL_NULL_HANDLE); Py_DECREF(cnxn); return 0; } } TRACE("cnxn.new cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); // // Gather connection-level information we'll need later. // Object info(GetConnectionInfo(pConnectString, cnxn)); if (!info.IsValid()) { Py_DECREF(cnxn); return 0; } CnxnInfo* p = (CnxnInfo*)info.Get(); cnxn->odbc_major = p->odbc_major; cnxn->odbc_minor = p->odbc_minor; cnxn->supports_describeparam = p->supports_describeparam; cnxn->datetime_precision = p->datetime_precision; cnxn->need_long_data_len = p->need_long_data_len; cnxn->varchar_maxlength = p->varchar_maxlength; cnxn->wvarchar_maxlength = p->wvarchar_maxlength; cnxn->binary_maxlength = p->binary_maxlength; return reinterpret_cast(cnxn); } static void _clear_conv(Connection* cnxn) { if (cnxn->conv_count != 0) { pyodbc_free(cnxn->conv_types); cnxn->conv_types = 0; for (int i = 0; i < cnxn->conv_count; i++) Py_XDECREF(cnxn->conv_funcs[i]); pyodbc_free(cnxn->conv_funcs); cnxn->conv_funcs = 0; cnxn->conv_count = 0; } } static char set_attr_doc[] = "set_attr(attr_id, value) -> None\n\n" "Calls SQLSetConnectAttr with the given values.\n\n" "attr_id\n" " The attribute id (integer) to set. These are ODBC or driver constants.\n\n" "value\n" " An integer value.\n\n" "At this time, only integer values are supported and are always passed as SQLUINTEGER."; static PyObject* Connection_set_attr(PyObject* self, PyObject* args) { int id; int value; if (!PyArg_ParseTuple(args, "ii", &id, &value)) return 0; Connection* cnxn = (Connection*)self; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, id, (SQLPOINTER)(intptr_t)value, SQL_IS_INTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cnxn, "SQLSetConnectAttr", cnxn->hdbc, SQL_NULL_HANDLE); Py_RETURN_NONE; } static char conv_clear_doc[] = "clear_output_converters() --> None\n\n" "Remove all output converter functions."; static PyObject* Connection_conv_clear(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = (Connection*)self; _clear_conv(cnxn); Py_RETURN_NONE; } static int Connection_clear(PyObject* self) { // Internal method for closing the connection. (Not called close so it isn't confused with the external close // method.) Connection* cnxn = (Connection*)self; if (cnxn->hdbc != SQL_NULL_HANDLE) { TRACE("cnxn.clear cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); HDBC hdbc = cnxn->hdbc; cnxn->hdbc = SQL_NULL_HANDLE; Py_BEGIN_ALLOW_THREADS if (cnxn->nAutoCommit == SQL_AUTOCOMMIT_OFF) SQLEndTran(SQL_HANDLE_DBC, hdbc, SQL_ROLLBACK); SQLDisconnect(hdbc); SQLFreeHandle(SQL_HANDLE_DBC, hdbc); Py_END_ALLOW_THREADS } Py_XDECREF(cnxn->searchescape); cnxn->searchescape = 0; free((void*)cnxn->sqlchar_enc.name); cnxn->sqlchar_enc.name = 0; free((void*)cnxn->sqlwchar_enc.name); cnxn->sqlwchar_enc.name = 0; free((void*)cnxn->metadata_enc.name); cnxn->metadata_enc.name = 0; free((void*)cnxn->unicode_enc.name); cnxn->unicode_enc.name = 0; #if PY_MAJOR_VERSION < 3 free((void*)cnxn->str_enc.name); cnxn->str_enc.name = 0; #endif Py_XDECREF(cnxn->attrs_before); cnxn->attrs_before = 0; _clear_conv(cnxn); return 0; } static void Connection_dealloc(PyObject* self) { Connection_clear(self); PyObject_Del(self); } static char close_doc[] = "Close the connection now (rather than whenever __del__ is called).\n" "\n" "The connection will be unusable from this point forward and a ProgrammingError\n" "will be raised if any operation is attempted with the connection. The same\n" "applies to all cursor objects trying to use the connection.\n" "\n" "Note that closing a connection without committing the changes first will cause\n" "an implicit rollback to be performed."; static PyObject* Connection_close(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; Connection_clear(self); Py_RETURN_NONE; } static PyObject* Connection_cursor(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; return (PyObject*)Cursor_New(cnxn); } static PyObject* Connection_execute(PyObject* self, PyObject* args) { PyObject* result = 0; Cursor* cursor; Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; cursor = Cursor_New(cnxn); if (!cursor) return 0; result = Cursor_execute((PyObject*)cursor, args); Py_DECREF((PyObject*)cursor); return result; } enum { GI_YESNO, GI_STRING, GI_UINTEGER, GI_USMALLINT, }; struct GetInfoType { SQLUSMALLINT infotype; int datatype; // GI_XXX }; static const GetInfoType aInfoTypes[] = { // SQL_CONVERT_X { SQL_CONVERT_FUNCTIONS, GI_UINTEGER }, { SQL_CONVERT_BIGINT, GI_UINTEGER }, { SQL_CONVERT_BINARY, GI_UINTEGER }, { SQL_CONVERT_BIT, GI_UINTEGER }, { SQL_CONVERT_CHAR, GI_UINTEGER }, { SQL_CONVERT_DATE, GI_UINTEGER }, { SQL_CONVERT_DECIMAL, GI_UINTEGER }, { SQL_CONVERT_DOUBLE, GI_UINTEGER }, { SQL_CONVERT_FLOAT, GI_UINTEGER }, { SQL_CONVERT_INTEGER, GI_UINTEGER }, { SQL_CONVERT_LONGVARCHAR, GI_UINTEGER }, { SQL_CONVERT_NUMERIC, GI_UINTEGER }, { SQL_CONVERT_REAL, GI_UINTEGER }, { SQL_CONVERT_SMALLINT, GI_UINTEGER }, { SQL_CONVERT_TIME, GI_UINTEGER }, { SQL_CONVERT_TIMESTAMP, GI_UINTEGER }, { SQL_CONVERT_TINYINT, GI_UINTEGER }, { SQL_CONVERT_VARBINARY, GI_UINTEGER }, { SQL_CONVERT_VARCHAR, GI_UINTEGER }, { SQL_CONVERT_LONGVARBINARY, GI_UINTEGER }, { SQL_CONVERT_WCHAR, GI_UINTEGER }, { SQL_CONVERT_INTERVAL_DAY_TIME, GI_UINTEGER }, { SQL_CONVERT_INTERVAL_YEAR_MONTH, GI_UINTEGER }, { SQL_CONVERT_WLONGVARCHAR, GI_UINTEGER }, { SQL_CONVERT_WVARCHAR, GI_UINTEGER }, { SQL_CONVERT_GUID, GI_UINTEGER }, { SQL_ACCESSIBLE_PROCEDURES, GI_YESNO }, { SQL_ACCESSIBLE_TABLES, GI_YESNO }, { SQL_ACTIVE_ENVIRONMENTS, GI_USMALLINT }, { SQL_AGGREGATE_FUNCTIONS, GI_UINTEGER }, { SQL_ALTER_DOMAIN, GI_UINTEGER }, { SQL_ALTER_TABLE, GI_UINTEGER }, { SQL_ASYNC_MODE, GI_UINTEGER }, { SQL_BATCH_ROW_COUNT, GI_UINTEGER }, { SQL_BATCH_SUPPORT, GI_UINTEGER }, { SQL_BOOKMARK_PERSISTENCE, GI_UINTEGER }, { SQL_CATALOG_LOCATION, GI_USMALLINT }, { SQL_CATALOG_NAME, GI_YESNO }, { SQL_CATALOG_NAME_SEPARATOR, GI_STRING }, { SQL_CATALOG_TERM, GI_STRING }, { SQL_CATALOG_USAGE, GI_UINTEGER }, { SQL_COLLATION_SEQ, GI_STRING }, { SQL_COLUMN_ALIAS, GI_YESNO }, { SQL_CONCAT_NULL_BEHAVIOR, GI_USMALLINT }, { SQL_CORRELATION_NAME, GI_USMALLINT }, { SQL_CREATE_ASSERTION, GI_UINTEGER }, { SQL_CREATE_CHARACTER_SET, GI_UINTEGER }, { SQL_CREATE_COLLATION, GI_UINTEGER }, { SQL_CREATE_DOMAIN, GI_UINTEGER }, { SQL_CREATE_SCHEMA, GI_UINTEGER }, { SQL_CREATE_TABLE, GI_UINTEGER }, { SQL_CREATE_TRANSLATION, GI_UINTEGER }, { SQL_CREATE_VIEW, GI_UINTEGER }, { SQL_CURSOR_COMMIT_BEHAVIOR, GI_USMALLINT }, { SQL_CURSOR_ROLLBACK_BEHAVIOR, GI_USMALLINT }, { SQL_DATABASE_NAME, GI_STRING }, { SQL_DATA_SOURCE_NAME, GI_STRING }, { SQL_DATA_SOURCE_READ_ONLY, GI_YESNO }, { SQL_DATETIME_LITERALS, GI_UINTEGER }, { SQL_DBMS_NAME, GI_STRING }, { SQL_DBMS_VER, GI_STRING }, { SQL_DDL_INDEX, GI_UINTEGER }, { SQL_DEFAULT_TXN_ISOLATION, GI_UINTEGER }, { SQL_DESCRIBE_PARAMETER, GI_YESNO }, { SQL_DM_VER, GI_STRING }, { SQL_DRIVER_NAME, GI_STRING }, { SQL_DRIVER_ODBC_VER, GI_STRING }, { SQL_DRIVER_VER, GI_STRING }, { SQL_DROP_ASSERTION, GI_UINTEGER }, { SQL_DROP_CHARACTER_SET, GI_UINTEGER }, { SQL_DROP_COLLATION, GI_UINTEGER }, { SQL_DROP_DOMAIN, GI_UINTEGER }, { SQL_DROP_SCHEMA, GI_UINTEGER }, { SQL_DROP_TABLE, GI_UINTEGER }, { SQL_DROP_TRANSLATION, GI_UINTEGER }, { SQL_DROP_VIEW, GI_UINTEGER }, { SQL_DYNAMIC_CURSOR_ATTRIBUTES1, GI_UINTEGER }, { SQL_DYNAMIC_CURSOR_ATTRIBUTES2, GI_UINTEGER }, { SQL_EXPRESSIONS_IN_ORDERBY, GI_YESNO }, { SQL_FILE_USAGE, GI_USMALLINT }, { SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1, GI_UINTEGER }, { SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2, GI_UINTEGER }, { SQL_GETDATA_EXTENSIONS, GI_UINTEGER }, { SQL_GROUP_BY, GI_USMALLINT }, { SQL_IDENTIFIER_CASE, GI_USMALLINT }, { SQL_IDENTIFIER_QUOTE_CHAR, GI_STRING }, { SQL_INDEX_KEYWORDS, GI_UINTEGER }, { SQL_INFO_SCHEMA_VIEWS, GI_UINTEGER }, { SQL_INSERT_STATEMENT, GI_UINTEGER }, { SQL_INTEGRITY, GI_YESNO }, { SQL_KEYSET_CURSOR_ATTRIBUTES1, GI_UINTEGER }, { SQL_KEYSET_CURSOR_ATTRIBUTES2, GI_UINTEGER }, { SQL_KEYWORDS, GI_STRING }, { SQL_LIKE_ESCAPE_CLAUSE, GI_YESNO }, { SQL_MAX_ASYNC_CONCURRENT_STATEMENTS, GI_UINTEGER }, { SQL_MAX_BINARY_LITERAL_LEN, GI_UINTEGER }, { SQL_MAX_CATALOG_NAME_LEN, GI_USMALLINT }, { SQL_MAX_CHAR_LITERAL_LEN, GI_UINTEGER }, { SQL_MAX_COLUMNS_IN_GROUP_BY, GI_USMALLINT }, { SQL_MAX_COLUMNS_IN_INDEX, GI_USMALLINT }, { SQL_MAX_COLUMNS_IN_ORDER_BY, GI_USMALLINT }, { SQL_MAX_COLUMNS_IN_SELECT, GI_USMALLINT }, { SQL_MAX_COLUMNS_IN_TABLE, GI_USMALLINT }, { SQL_MAX_COLUMN_NAME_LEN, GI_USMALLINT }, { SQL_MAX_CONCURRENT_ACTIVITIES, GI_USMALLINT }, { SQL_MAX_CURSOR_NAME_LEN, GI_USMALLINT }, { SQL_MAX_DRIVER_CONNECTIONS, GI_USMALLINT }, { SQL_MAX_IDENTIFIER_LEN, GI_USMALLINT }, { SQL_MAX_INDEX_SIZE, GI_UINTEGER }, { SQL_MAX_PROCEDURE_NAME_LEN, GI_USMALLINT }, { SQL_MAX_ROW_SIZE, GI_UINTEGER }, { SQL_MAX_ROW_SIZE_INCLUDES_LONG, GI_YESNO }, { SQL_MAX_SCHEMA_NAME_LEN, GI_USMALLINT }, { SQL_MAX_STATEMENT_LEN, GI_UINTEGER }, { SQL_MAX_TABLES_IN_SELECT, GI_USMALLINT }, { SQL_MAX_TABLE_NAME_LEN, GI_USMALLINT }, { SQL_MAX_USER_NAME_LEN, GI_USMALLINT }, { SQL_MULTIPLE_ACTIVE_TXN, GI_YESNO }, { SQL_MULT_RESULT_SETS, GI_YESNO }, { SQL_NEED_LONG_DATA_LEN, GI_YESNO }, { SQL_NON_NULLABLE_COLUMNS, GI_USMALLINT }, { SQL_NULL_COLLATION, GI_USMALLINT }, { SQL_NUMERIC_FUNCTIONS, GI_UINTEGER }, { SQL_ODBC_INTERFACE_CONFORMANCE, GI_UINTEGER }, { SQL_ODBC_VER, GI_STRING }, { SQL_OJ_CAPABILITIES, GI_UINTEGER }, { SQL_ORDER_BY_COLUMNS_IN_SELECT, GI_YESNO }, { SQL_PARAM_ARRAY_ROW_COUNTS, GI_UINTEGER }, { SQL_PARAM_ARRAY_SELECTS, GI_UINTEGER }, { SQL_PROCEDURES, GI_YESNO }, { SQL_PROCEDURE_TERM, GI_STRING }, { SQL_QUOTED_IDENTIFIER_CASE, GI_USMALLINT }, { SQL_ROW_UPDATES, GI_YESNO }, { SQL_SCHEMA_TERM, GI_STRING }, { SQL_SCHEMA_USAGE, GI_UINTEGER }, { SQL_SCROLL_OPTIONS, GI_UINTEGER }, { SQL_SEARCH_PATTERN_ESCAPE, GI_STRING }, { SQL_SERVER_NAME, GI_STRING }, { SQL_SPECIAL_CHARACTERS, GI_STRING }, { SQL_SQL92_DATETIME_FUNCTIONS, GI_UINTEGER }, { SQL_SQL92_FOREIGN_KEY_DELETE_RULE, GI_UINTEGER }, { SQL_SQL92_FOREIGN_KEY_UPDATE_RULE, GI_UINTEGER }, { SQL_SQL92_GRANT, GI_UINTEGER }, { SQL_SQL92_NUMERIC_VALUE_FUNCTIONS, GI_UINTEGER }, { SQL_SQL92_PREDICATES, GI_UINTEGER }, { SQL_SQL92_RELATIONAL_JOIN_OPERATORS, GI_UINTEGER }, { SQL_SQL92_REVOKE, GI_UINTEGER }, { SQL_SQL92_ROW_VALUE_CONSTRUCTOR, GI_UINTEGER }, { SQL_SQL92_STRING_FUNCTIONS, GI_UINTEGER }, { SQL_SQL92_VALUE_EXPRESSIONS, GI_UINTEGER }, { SQL_SQL_CONFORMANCE, GI_UINTEGER }, { SQL_STANDARD_CLI_CONFORMANCE, GI_UINTEGER }, { SQL_STATIC_CURSOR_ATTRIBUTES1, GI_UINTEGER }, { SQL_STATIC_CURSOR_ATTRIBUTES2, GI_UINTEGER }, { SQL_STRING_FUNCTIONS, GI_UINTEGER }, { SQL_SUBQUERIES, GI_UINTEGER }, { SQL_SYSTEM_FUNCTIONS, GI_UINTEGER }, { SQL_TABLE_TERM, GI_STRING }, { SQL_TIMEDATE_ADD_INTERVALS, GI_UINTEGER }, { SQL_TIMEDATE_DIFF_INTERVALS, GI_UINTEGER }, { SQL_TIMEDATE_FUNCTIONS, GI_UINTEGER }, { SQL_TXN_CAPABLE, GI_USMALLINT }, { SQL_TXN_ISOLATION_OPTION, GI_UINTEGER }, { SQL_UNION, GI_UINTEGER }, { SQL_USER_NAME, GI_STRING }, { SQL_XOPEN_CLI_YEAR, GI_STRING }, }; static PyObject* Connection_getinfo(PyObject* self, PyObject* args) { Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; unsigned long infotype; if (!PyArg_ParseTuple(args, "k", &infotype)) return 0; unsigned int i = 0; for (; i < _countof(aInfoTypes); i++) { if (aInfoTypes[i].infotype == infotype) break; } if (i == _countof(aInfoTypes)) return RaiseErrorV(0, ProgrammingError, "Unsupported getinfo value: %d", infotype); char szBuffer[0x1000]; SQLSMALLINT cch = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetInfo(cnxn->hdbc, (SQLUSMALLINT)infotype, szBuffer, sizeof(szBuffer), &cch); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLGetInfo", cnxn->hdbc, SQL_NULL_HANDLE); return 0; } PyObject* result = 0; switch (aInfoTypes[i].datatype) { case GI_YESNO: result = (szBuffer[0] == 'Y') ? Py_True : Py_False; Py_INCREF(result); break; case GI_STRING: result = PyString_FromStringAndSize(szBuffer, (Py_ssize_t)cch); break; case GI_UINTEGER: { SQLUINTEGER n = *(SQLUINTEGER*)szBuffer; // Does this work on PPC or do we need a union? #if PY_MAJOR_VERSION >= 3 result = PyLong_FromLong((long)n); #else if (n <= (SQLUINTEGER)PyInt_GetMax()) result = PyInt_FromLong((long)n); else result = PyLong_FromUnsignedLong(n); #endif break; } case GI_USMALLINT: result = PyInt_FromLong(*(SQLUSMALLINT*)szBuffer); break; } return result; } PyObject* Connection_endtrans(Connection* cnxn, SQLSMALLINT type) { // If called from Cursor.commit, it is possible that `cnxn` is deleted by another thread when we release them // below. (The cursor has had its reference incremented by the method it is calling, but nothing has incremented // the connections count. We could, but we really only need the HDBC.) HDBC hdbc = cnxn->hdbc; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLEndTran(SQL_HANDLE_DBC, hdbc, type); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLEndTran", hdbc, SQL_NULL_HANDLE); return 0; } Py_RETURN_NONE; } static PyObject* Connection_commit(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; TRACE("commit: cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); return Connection_endtrans(cnxn, SQL_COMMIT); } static PyObject* Connection_rollback(PyObject* self, PyObject* args) { UNUSED(args); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; TRACE("rollback: cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); return Connection_endtrans(cnxn, SQL_ROLLBACK); } static char cursor_doc[] = "Return a new Cursor object using the connection."; static char execute_doc[] = "execute(sql, [params]) --> Cursor\n" "\n" "Create a new Cursor object, call its execute method, and return it. See\n" "Cursor.execute for more details.\n" "\n" "This is a convenience method that is not part of the DB API. Since a new\n" "Cursor is allocated by each call, this should not be used if more than one SQL\n" "statement needs to be executed."; static char commit_doc[] = "Commit any pending transaction to the database."; static char rollback_doc[] = "Causes the the database to roll back to the start of any pending transaction."; static char getinfo_doc[] = "getinfo(type) --> str | int | bool\n" "\n" "Calls SQLGetInfo, passing `type`, and returns the result formatted as a Python object."; PyObject* Connection_getautocommit(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; PyObject* result = (cnxn->nAutoCommit == SQL_AUTOCOMMIT_ON) ? Py_True : Py_False; Py_INCREF(result); return result; } static int Connection_setautocommit(PyObject* self, PyObject* value, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return -1; if (value == 0) { PyErr_SetString(PyExc_TypeError, "Cannot delete the autocommit attribute."); return -1; } uintptr_t nAutoCommit = PyObject_IsTrue(value) ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)nAutoCommit, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLSetConnectAttr", cnxn->hdbc, SQL_NULL_HANDLE); return -1; } cnxn->nAutoCommit = nAutoCommit; return 0; } static PyObject* Connection_getclosed(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn; if (self == 0 || !Connection_Check(self)) { PyErr_SetString(PyExc_TypeError, "Connection object required"); return 0; } cnxn = (Connection*)self; if (cnxn->hdbc == SQL_NULL_HANDLE) { Py_RETURN_TRUE; } Py_RETURN_FALSE; } static PyObject* Connection_getsearchescape(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn = (Connection*)self; if (!cnxn->searchescape) { char sz[8] = { 0 }; SQLSMALLINT cch = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetInfo(cnxn->hdbc, SQL_SEARCH_PATTERN_ESCAPE, &sz, _countof(sz), &cch); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cnxn, "SQLGetInfo", cnxn->hdbc, SQL_NULL_HANDLE); cnxn->searchescape = PyString_FromStringAndSize(sz, (Py_ssize_t)cch); } Py_INCREF(cnxn->searchescape); return cnxn->searchescape; } static PyObject* Connection_getmaxwrite(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; return PyLong_FromSsize_t(cnxn->maxwrite); } static int Connection_setmaxwrite(PyObject* self, PyObject* value, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return -1; if (value == 0) { PyErr_SetString(PyExc_TypeError, "Cannot delete the maxwrite attribute."); return -1; } long maxwrite = PyLong_AsLong(value); if (PyErr_Occurred()) return -1; Py_ssize_t minval = 255; if (maxwrite != 0 && maxwrite < minval) { PyErr_Format(PyExc_ValueError, "Cannot set maxwrite less than %d unless setting to 0.", (int)minval); return -1; } cnxn->maxwrite = maxwrite; return 0; } static PyObject* Connection_gettimeout(PyObject* self, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return 0; return PyInt_FromLong(cnxn->timeout); } static int Connection_settimeout(PyObject* self, PyObject* value, void* closure) { UNUSED(closure); Connection* cnxn = Connection_Validate(self); if (!cnxn) return -1; if (value == 0) { PyErr_SetString(PyExc_TypeError, "Cannot delete the timeout attribute."); return -1; } long timeout = PyInt_AsLong(value); if (timeout == -1 && PyErr_Occurred()) return -1; if (timeout < 0) { PyErr_SetString(PyExc_ValueError, "Cannot set a negative timeout."); return -1; } SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_CONNECTION_TIMEOUT, (SQLPOINTER)(uintptr_t)timeout, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLSetConnectAttr", cnxn->hdbc, SQL_NULL_HANDLE); return -1; } cnxn->timeout = timeout; return 0; } static bool _remove_converter(PyObject* self, SQLSMALLINT sqltype) { Connection* cnxn = (Connection*)self; if (!cnxn->conv_count) { // There are no converters, so nothing to remove. return true; } int count = cnxn->conv_count; SQLSMALLINT* types = cnxn->conv_types; PyObject** funcs = cnxn->conv_funcs; int i = 0; for (; i < count; i++) if (types[i] == sqltype) break; if (i == count) { // There is no converter for this type, so nothing to remove. return true; } Py_DECREF(funcs[i]); int move = count - i - 1; // How many are we moving? if (move > 0) { memcpy(&types[i], &types[i+1], move * sizeof(SQLSMALLINT)); memcpy(&funcs[i], &funcs[i+1], move * sizeof(PyObject*)); } count--; // Note: If the realloc fails, the old array is still around and is 1 element too long but // everything will still work, so we ignore. pyodbc_realloc((BYTE**)&types, count * sizeof(SQLSMALLINT)); pyodbc_realloc((BYTE**)&funcs, count * sizeof(PyObject*)); cnxn->conv_count = count; cnxn->conv_types = types; cnxn->conv_funcs = funcs; return true; } static bool _add_converter(PyObject* self, SQLSMALLINT sqltype, PyObject* func) { Connection* cnxn = (Connection*)self; if (cnxn->conv_count) { // If the sqltype is already registered, replace the old conversion function with the new. for (int i = 0; i < cnxn->conv_count; i++) { if (cnxn->conv_types[i] == sqltype) { Py_XDECREF(cnxn->conv_funcs[i]); cnxn->conv_funcs[i] = func; Py_INCREF(func); return true; } } } int oldcount = cnxn->conv_count; SQLSMALLINT* oldtypes = cnxn->conv_types; PyObject** oldfuncs = cnxn->conv_funcs; int newcount = oldcount + 1; SQLSMALLINT* newtypes = (SQLSMALLINT*)pyodbc_malloc(sizeof(SQLSMALLINT) * newcount); PyObject** newfuncs = (PyObject**)pyodbc_malloc(sizeof(PyObject*) * newcount); if (newtypes == 0 || newfuncs == 0) { if (newtypes) pyodbc_free(newtypes); if (newfuncs) pyodbc_free(newfuncs); PyErr_NoMemory(); return false; } newtypes[0] = sqltype; newfuncs[0] = func; Py_INCREF(func); cnxn->conv_count = newcount; cnxn->conv_types = newtypes; cnxn->conv_funcs = newfuncs; if (oldcount != 0) { // copy old items memcpy(&newtypes[1], oldtypes, sizeof(SQLSMALLINT) * oldcount); memcpy(&newfuncs[1], oldfuncs, sizeof(PyObject*) * oldcount); pyodbc_free(oldtypes); pyodbc_free(oldfuncs); } return true; } static char conv_add_doc[] = "add_output_converter(sqltype, func) --> None\n" "\n" "Register an output converter function that will be called whenever a value with\n" "the given SQL type is read from the database.\n" "\n" "sqltype\n" " The integer SQL type value to convert, which can be one of the defined\n" " standard constants (e.g. pyodbc.SQL_VARCHAR) or a database-specific value\n" " (e.g. -151 for the SQL Server 2008 geometry data type).\n" "\n" "func\n" " The converter function which will be called with a single parameter, the\n" " value, and should return the converted value. If the value is NULL, the\n" " parameter will be None. Otherwise it will be a " #if PY_MAJOR_VERSION >= 3 "bytes object.\n" #else "str object with the raw bytes.\n" #endif "\n" "If func is None, any existing converter is removed." ; static PyObject* Connection_conv_add(PyObject* self, PyObject* args) { int sqltype; PyObject* func; if (!PyArg_ParseTuple(args, "iO", &sqltype, &func)) return 0; if (func != Py_None) { if (!_add_converter(self, (SQLSMALLINT)sqltype, func)) return 0; } else { if (!_remove_converter(self, (SQLSMALLINT)sqltype)) return 0; } Py_RETURN_NONE; } static char conv_remove_doc[] = "remove_output_converter(sqltype) --> None\n" "\n" "Remove an output converter function that was registered with\n" "add_output_converter. It is safe to call if no converter is\n" "registered for the type.\n" "\n" "sqltype\n" " The integer SQL type value being converted, which can be one of the defined\n" " standard constants (e.g. pyodbc.SQL_VARCHAR) or a database-specific value\n" " (e.g. -151 for the SQL Server 2008 geometry data type).\n" ; static PyObject* Connection_conv_remove(PyObject* self, PyObject* args) { int sqltype; if (!PyArg_ParseTuple(args, "i", &sqltype)) return 0; if (!_remove_converter(self, (SQLSMALLINT)sqltype)) return 0; Py_RETURN_NONE; } static char conv_get_doc[] = "get_output_converter(sqltype) --> \n" "\n" "Get the output converter function that was registered with\n" "add_output_converter. It is safe to call if no converter is\n" "registered for the type (returns None).\n" "\n" "sqltype\n" " The integer SQL type value being converted, which can be one of the defined\n" " standard constants (e.g. pyodbc.SQL_VARCHAR) or a database-specific value\n" " (e.g. -151 for the SQL Server 2008 geometry data type).\n" ; static PyObject* _get_converter(PyObject* self, SQLSMALLINT sqltype) { Connection* cnxn = (Connection*)self; if (cnxn->conv_count) { for (int i = 0; i < cnxn->conv_count; i++) { if (cnxn->conv_types[i] == sqltype) { return cnxn->conv_funcs[i]; } } } Py_RETURN_NONE; } static PyObject* Connection_conv_get(PyObject* self, PyObject* args) { int sqltype; if (!PyArg_ParseTuple(args, "i", &sqltype)) return 0; return _get_converter(self, (SQLSMALLINT)sqltype); } static void NormalizeCodecName(const char* src, char* dest, size_t cbDest) { // Copies the codec name to dest, lowercasing it and replacing underscores with dashes. // (Same as _Py_normalize_encoding which is not public.) It also wraps the value with // pipes so we can search with it. // // UTF_8 --> |utf-8| // // This is an internal function - it will truncate so you should use a buffer bigger than // anything you expect to search for. char* pch = &dest[0]; char* pchLast = pch + cbDest - 2; // -2 -> leave room for pipe and null *pch++ = '|'; while (*src && pch < pchLast) { if (isupper(*src)) { *pch++ = (char)tolower(*src++); } else if (*src == '_') { *pch++ = '-'; src++; } else { *pch++ = *src++; } } *pch++ = '|'; *pch = '\0'; } static bool SetTextEncCommon(TextEnc& enc, const char* encoding, int ctype, bool allow_raw) { // Code common to setencoding and setdecoding. if (!encoding) { PyErr_Format(PyExc_ValueError, "encoding is required"); return false; } // Normalize the names so we don't have to worry about case or dashes vs underscores. // We'll lowercase everything and convert underscores to dashes. The results are then // surrounded with pipes so we can search strings. (See the `strstr` calls below.) char lower[30]; NormalizeCodecName(encoding, lower, sizeof(lower)); #if PY_MAJOR_VERSION < 3 if (strcmp(lower, "|raw|") == 0) { if (!allow_raw) { // Give a better error message for 'raw' than "not a registered codec". It is never // registered. PyErr_Format(PyExc_ValueError, "Raw codec is only allowed for str / SQL_CHAR"); return false; } } else if (!PyCodec_KnownEncoding(encoding)) { PyErr_Format(PyExc_ValueError, "not a registered codec: '%s'", encoding); return false; } #else if (!PyCodec_KnownEncoding(encoding)) { PyErr_Format(PyExc_ValueError, "not a registered codec: '%s'", encoding); return false; } #endif if (ctype != 0 && ctype != SQL_WCHAR && ctype != SQL_CHAR) { PyErr_Format(PyExc_ValueError, "Invalid ctype %d. Must be SQL_CHAR or SQL_WCHAR", ctype); return false; } char* cpy = _strdup(encoding); if (!cpy) { PyErr_NoMemory(); return false; } free((void*)enc.name); enc.name = cpy; if (strstr("|utf-8|utf8|", lower)) { enc.optenc = OPTENC_UTF8; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_CHAR); } else if (strstr("|utf-16|utf16|", lower)) { enc.optenc = OPTENC_UTF16; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_WCHAR); } else if (strstr("|utf-16-be|utf-16be|utf16be|", lower)) { enc.optenc = OPTENC_UTF16BE; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_WCHAR); } else if (strstr("|utf-16-le|utf-16le|utf16le|", lower)) { enc.optenc = OPTENC_UTF16LE; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_WCHAR); } else if (strstr("|utf-32|utf32|", lower)) { enc.optenc = OPTENC_UTF32; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_WCHAR); } else if (strstr("|utf-32-be|utf-32be|utf32be|", lower)) { enc.optenc = OPTENC_UTF32BE; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_WCHAR); } else if (strstr("|utf-32-le|utf-32le|utf32le|", lower)) { enc.optenc = OPTENC_UTF32LE; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_WCHAR); } else if (strstr("|latin-1|latin1|iso-8859-1|iso8859-1|", lower)) { enc.optenc = OPTENC_LATIN1; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_CHAR); } #if PY_MAJOR_VERSION < 3 else if (strstr("|raw|", lower)) { enc.optenc = OPTENC_RAW; enc.ctype = SQL_C_CHAR; } #endif else { enc.optenc = OPTENC_NONE; enc.ctype = SQL_C_CHAR; } return true; } static PyObject* Connection_setencoding(PyObject* self, PyObject* args, PyObject* kwargs) { Connection* cnxn = (Connection*)self; #if PY_MAJOR_VERSION >= 3 // In Python 3 we only support encodings for Unicode text. char* encoding = 0; int ctype = 0; static char *kwlist[] = { "encoding", "ctype", 0 }; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|si", kwlist, &encoding, &ctype)) return 0; TextEnc& enc = cnxn->unicode_enc; bool allow_raw = false; #else // In Python 2, we support encodings for Unicode and strings. PyObject* from_type; char* encoding = 0; int ctype = 0; static char *kwlist[] = { "fromtype", "encoding", "ctype", 0 }; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|si", kwlist, &from_type, &encoding, &ctype)) return 0; if (!IsUnicodeType(from_type) && ! IsStringType(from_type)) return PyErr_Format(PyExc_TypeError, "fromtype must be str or unicode"); TextEnc& enc = IsStringType(from_type) ? cnxn->str_enc : cnxn->unicode_enc; bool allow_raw = IsStringType(from_type); #endif if (!SetTextEncCommon(enc, encoding, ctype, allow_raw)) return 0; Py_RETURN_NONE; } static char setdecoding_doc[] = #if PY_MAJOR_VERSION >= 3 "setdecoding(sqltype, encoding=None, ctype=None) --> None\n" #else "setdecoding(sqltype, encoding=None, ctype=None, to=None) --> None\n" #endif "\n" "Configures how text of type `ctype` (SQL_CHAR or SQL_WCHAR) is decoded\n" "when read from the database.\n" "\n" "When reading, the database will assign one of the sqltypes to text columns.\n" "pyodbc uses this lookup the decoding information set by this function.\n" "sqltype: pyodbc.SQL_CHAR or pyodbc.SQL_WCHAR\n\n" "encoding: A registered Python encoding such as \"utf-8\".\n\n" #if PY_MAJOR_VERSION < 3 "to: the desired Python object type - str or unicode" #endif "ctype: The C data type should be requested. Set this to SQL_CHAR for\n" " single-byte encodings like UTF-8 and to SQL_WCHAR for two-byte encodings\n" " like UTF-16."; static PyObject* Connection_setdecoding(PyObject* self, PyObject* args, PyObject* kwargs) { Connection* cnxn = (Connection*)self; int sqltype; char* encoding = 0; int ctype = 0; bool allow_raw = false; #if PY_MAJOR_VERSION >= 3 static char *kwlist[] = {"sqltype", "encoding", "ctype", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|si", kwlist, &sqltype, &encoding, &ctype)) return 0; #else int to = 0; PyObject* toObj = 0; static char *kwlist[] = {"sqltype", "encoding", "ctype", "to", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|siO", kwlist, &sqltype, &encoding, &ctype, &toObj)) return 0; if (toObj) { if (IsUnicodeType(toObj)) to = TO_UNICODE; else if (IsStringType(toObj)) to = TO_STR; else return PyErr_Format(PyExc_ValueError, "`to` can only be unicode or str"); } allow_raw = (sqltype == SQL_CHAR && to != TO_UNICODE); #endif if (sqltype != SQL_WCHAR && sqltype != SQL_CHAR && sqltype != SQL_WMETADATA) return PyErr_Format(PyExc_ValueError, "Invalid sqltype %d. Must be SQL_CHAR or SQL_WCHAR or SQL_WMETADATA", sqltype); TextEnc& enc = (sqltype == SQL_CHAR) ? cnxn->sqlchar_enc : ((sqltype == SQL_WMETADATA) ? cnxn->metadata_enc : cnxn->sqlwchar_enc); if (!SetTextEncCommon(enc, encoding, ctype, allow_raw)) return 0; #if PY_MAJOR_VERSION < 3 if (!to && enc.optenc == OPTENC_RAW) enc.to = TO_STR; else enc.to = to ? to : TO_UNICODE; #endif Py_RETURN_NONE; } static char enter_doc[] = "__enter__() -> self."; static PyObject* Connection_enter(PyObject* self, PyObject* args) { UNUSED(args); Py_INCREF(self); return self; } static char exit_doc[] = "__exit__(*excinfo) -> None. Commits the connection if necessary."; static PyObject* Connection_exit(PyObject* self, PyObject* args) { Connection* cnxn = (Connection*)self; // If an error has occurred, `args` will be a tuple of 3 values. Otherwise it will be a tuple of 3 `None`s. I(PyTuple_Check(args)); if (cnxn->nAutoCommit == SQL_AUTOCOMMIT_OFF) { SQLSMALLINT CompletionType = (PyTuple_GetItem(args, 0) == Py_None) ? SQL_COMMIT : SQL_ROLLBACK; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLEndTran(SQL_HANDLE_DBC, cnxn->hdbc, CompletionType); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { const char* szFunc = (CompletionType == SQL_COMMIT) ? "SQLEndTran(SQL_COMMIT)" : "SQLEndTran(SQL_ROLLBACK)"; return RaiseErrorFromHandle(cnxn, szFunc, cnxn->hdbc, SQL_NULL_HANDLE); } } Py_RETURN_NONE; } static struct PyMethodDef Connection_methods[] = { { "cursor", Connection_cursor, METH_NOARGS, cursor_doc }, { "close", Connection_close, METH_NOARGS, close_doc }, { "execute", Connection_execute, METH_VARARGS, execute_doc }, { "commit", Connection_commit, METH_NOARGS, commit_doc }, { "rollback", Connection_rollback, METH_NOARGS, rollback_doc }, { "getinfo", Connection_getinfo, METH_VARARGS, getinfo_doc }, { "add_output_converter", Connection_conv_add, METH_VARARGS, conv_add_doc }, { "remove_output_converter", Connection_conv_remove, METH_VARARGS, conv_remove_doc }, { "get_output_converter", Connection_conv_get, METH_VARARGS, conv_get_doc }, { "clear_output_converters", Connection_conv_clear, METH_NOARGS, conv_clear_doc }, { "setdecoding", (PyCFunction)Connection_setdecoding, METH_VARARGS|METH_KEYWORDS, setdecoding_doc }, { "setencoding", (PyCFunction)Connection_setencoding, METH_VARARGS|METH_KEYWORDS, 0 }, { "set_attr", Connection_set_attr, METH_VARARGS, set_attr_doc }, { "__enter__", Connection_enter, METH_NOARGS, enter_doc }, { "__exit__", Connection_exit, METH_VARARGS, exit_doc }, { 0, 0, 0, 0 } }; static PyGetSetDef Connection_getseters[] = { { "closed", (getter)Connection_getclosed, 0, "Returns True if the connection is closed; False otherwise.", 0}, { "searchescape", (getter)Connection_getsearchescape, 0, "The ODBC search pattern escape character, as returned by\n" "SQLGetInfo(SQL_SEARCH_PATTERN_ESCAPE). These are driver specific.", 0 }, { "autocommit", Connection_getautocommit, Connection_setautocommit, "Returns True if the connection is in autocommit mode; False otherwise.", 0 }, { "timeout", Connection_gettimeout, Connection_settimeout, "The timeout in seconds, zero means no timeout.", 0 }, { "maxwrite", Connection_getmaxwrite, Connection_setmaxwrite, "The maximum bytes to write before using SQLPutData.", 0 }, { 0 } }; PyTypeObject ConnectionType = { PyVarObject_HEAD_INIT(0, 0) "pyodbc.Connection", // tp_name sizeof(Connection), // tp_basicsize 0, // tp_itemsize Connection_dealloc, // destructor tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare 0, // tp_repr 0, // tp_as_number 0, // tp_as_sequence 0, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, // tp_flags connection_doc, // tp_doc 0, // tp_traverse 0, // tp_clear 0, // tp_richcompare 0, // tp_weaklistoffset 0, // tp_iter 0, // tp_iternext Connection_methods, // tp_methods 0, // tp_members Connection_getseters, // tp_getset 0, // tp_base 0, // tp_dict 0, // tp_descr_get 0, // tp_descr_set 0, // tp_dictoffset 0, // tp_init 0, // tp_alloc 0, // tp_new 0, // tp_free 0, // tp_is_gc 0, // tp_bases 0, // tp_mro 0, // tp_cache 0, // tp_subclasses 0, // tp_weaklist }; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/connection.h0000664000175000017500000001175200000000000017530 0ustar00mkleehammermkleehammer // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef CONNECTION_H #define CONNECTION_H struct Cursor; extern PyTypeObject ConnectionType; struct TextEnc; struct Connection { PyObject_HEAD // Set to SQL_NULL_HANDLE when the connection is closed. HDBC hdbc; // Will be SQL_AUTOCOMMIT_ON or SQL_AUTOCOMMIT_OFF. uintptr_t nAutoCommit; // The ODBC version the driver supports, from SQLGetInfo(DRIVER_ODBC_VER). This is set after connecting. char odbc_major; char odbc_minor; // The escape character from SQLGetInfo. This is not initialized until requested, so this may be zero! PyObject* searchescape; // Will be true if SQLDescribeParam is supported. If false, we'll have to guess but the user will not be able // to insert NULLs into binary columns. bool supports_describeparam; // The column size of datetime columns, obtained from SQLGetInfo(), used to determine the datetime precision. int datetime_precision; // The connection timeout in seconds. long timeout; // Pointer connection attributes may require that the pointed-to object be kept // valid until some unspecified time in the future, so keep them here for now. PyObject* attrs_before; TextEnc sqlchar_enc; // encoding used when reading SQL_CHAR data TextEnc sqlwchar_enc; // encoding used when reading SQL_WCHAR data TextEnc unicode_enc; // encoding used when writing unicode strings #if PY_MAJOR_VERSION < 3 TextEnc str_enc; // encoding used when writing non-unicode strings #endif TextEnc metadata_enc; // Used when reading column names for Cursor.description. I originally thought I could use // the TextEncs above based on whether I called SQLDescribeCol vs SQLDescribeColW. // Unfortunately it looks like PostgreSQL and MySQL (and probably others) ignore the ODBC // specification regarding encoding everywhere *except* in these functions - SQLDescribeCol // seems to always return UTF-16LE by them regardless of the connection settings. long maxwrite; // Used to override varchar_maxlength, etc. Those are initialized from // SQLGetTypeInfo but some drivers (e.g. psqlodbc) return almost arbitrary // values (like 255 chars) leading to very slow insert performance (lots of // small calls to SQLPutData). If this is zero the values from // SQLGetTypeInfo are used. Otherwise this value is used. // These are copied from cnxn info for performance and convenience. int varchar_maxlength; int wvarchar_maxlength; int binary_maxlength; SQLLEN GetMaxLength(SQLSMALLINT ctype) const { I(ctype == SQL_C_BINARY || ctype == SQL_C_WCHAR || ctype == SQL_C_CHAR); if (maxwrite != 0) return maxwrite; if (ctype == SQL_C_BINARY) return binary_maxlength; if (ctype == SQL_C_WCHAR) return wvarchar_maxlength; return varchar_maxlength; } bool need_long_data_len; // Output conversions. Maps from SQL type in conv_types to the converter function in conv_funcs. // // If conv_count is zero, conv_types and conv_funcs will also be zero. // // pyodbc uses this manual mapping for speed and portability. The STL collection classes use the new operator and // throw exceptions when out of memory. pyodbc does not use any exceptions. int conv_count; // how many items are in conv_types and conv_funcs. SQLSMALLINT* conv_types; // array of SQL_TYPEs to convert PyObject** conv_funcs; // array of Python functions }; #define Connection_Check(op) PyObject_TypeCheck(op, &ConnectionType) #define Connection_CheckExact(op) (Py_TYPE(op) == &ConnectionType) /* * Used by the module's connect function to create new connection objects. If unable to connect to the database, an * exception is set and zero is returned. */ PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi, long timeout, bool fReadOnly, PyObject* attrs_before, Object& encoding); /* * Used by the Cursor to implement commit and rollback. */ PyObject* Connection_endtrans(Connection* cnxn, SQLSMALLINT type); #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629144457.0 pyodbc-4.0.32/src/cursor.cpp0000664000175000017500000024776400000000000017257 0ustar00mkleehammermkleehammer// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // Note: This project has gone from C++ (when it was ported from pypgdb) to C, back to C++ (where it will stay). If // you are making modifications, feel free to move variable declarations from the top of functions to where they are // actually used. #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "cursor.h" #include "pyodbcmodule.h" #include "connection.h" #include "row.h" #include "buffer.h" #include "params.h" #include "errors.h" #include "getdata.h" #include "dbspecific.h" #include enum { CURSOR_REQUIRE_CNXN = 0x00000001, CURSOR_REQUIRE_OPEN = 0x00000003, // includes _CNXN CURSOR_REQUIRE_RESULTS = 0x00000007, // includes _OPEN CURSOR_RAISE_ERROR = 0x00000010, }; inline bool StatementIsValid(Cursor* cursor) { return cursor->cnxn != 0 && ((Connection*)cursor->cnxn)->hdbc != SQL_NULL_HANDLE && cursor->hstmt != SQL_NULL_HANDLE; } extern PyTypeObject CursorType; inline bool Cursor_Check(PyObject* o) { return o != 0 && Py_TYPE(o) == &CursorType; } static Cursor* Cursor_Validate(PyObject* obj, DWORD flags) { // Validates that a PyObject is a Cursor (like Cursor_Check) and optionally some other requirements controlled by // `flags`. If valid and all requirements (from the flags) are met, the cursor is returned, cast to Cursor*. // Otherwise zero is returned. // // Designed to be used at the top of methods to convert the PyObject pointer and perform necessary checks. // // Valid flags are from the CURSOR_ enum above. Note that unless CURSOR_RAISE_ERROR is supplied, an exception // will not be set. (When deallocating, we really don't want an exception.) Connection* cnxn = 0; Cursor* cursor = 0; if (!Cursor_Check(obj)) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "Invalid cursor object."); return 0; } cursor = (Cursor*)obj; cnxn = (Connection*)cursor->cnxn; if (cnxn == 0) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "Attempt to use a closed cursor."); return 0; } if (IsSet(flags, CURSOR_REQUIRE_OPEN)) { if (cursor->hstmt == SQL_NULL_HANDLE) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "Attempt to use a closed cursor."); return 0; } if (cnxn->hdbc == SQL_NULL_HANDLE) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "The cursor's connection has been closed."); return 0; } } if (IsSet(flags, CURSOR_REQUIRE_RESULTS) && cursor->colinfos == 0) { if (flags & CURSOR_RAISE_ERROR) PyErr_SetString(ProgrammingError, "No results. Previous SQL was not a query."); return 0; } return cursor; } inline bool IsNumericType(SQLSMALLINT sqltype) { switch (sqltype) { case SQL_DECIMAL: case SQL_NUMERIC: case SQL_REAL: case SQL_FLOAT: case SQL_DOUBLE: case SQL_SMALLINT: case SQL_INTEGER: case SQL_TINYINT: case SQL_BIGINT: return true; } return false; } static bool create_name_map(Cursor* cur, SQLSMALLINT field_count, bool lower) { // Called after an execute to construct the map shared by rows. bool success = false; PyObject *desc = 0, *colmap = 0, *colinfo = 0, *type = 0, *index = 0, *nullable_obj=0; SQLSMALLINT nameLen = 300; ODBCCHAR *szName = NULL; SQLRETURN ret; I(cur->hstmt != SQL_NULL_HANDLE && cur->colinfos != 0); // These are the values we expect after free_results. If this function fails, we do not modify any members, so // they should be set to something Cursor_close can deal with. I(cur->description == Py_None); I(cur->map_name_to_index == 0); if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } desc = PyTuple_New((Py_ssize_t)field_count); colmap = PyDict_New(); szName = (ODBCCHAR*) pyodbc_malloc((nameLen + 1) * sizeof(ODBCCHAR)); if (!desc || !colmap || !szName) goto done; for (int i = 0; i < field_count; i++) { SQLSMALLINT cchName; SQLSMALLINT nDataType; SQLULEN nColSize; // precision SQLSMALLINT cDecimalDigits; // scale SQLSMALLINT nullable; retry: Py_BEGIN_ALLOW_THREADS ret = SQLDescribeColW(cur->hstmt, (SQLUSMALLINT)(i + 1), (SQLWCHAR*)szName, nameLen, &cchName, &nDataType, &nColSize, &cDecimalDigits, &nullable); Py_END_ALLOW_THREADS if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); goto done; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cur->cnxn, "SQLDescribeCol", cur->cnxn->hdbc, cur->hstmt); goto done; } // If needed, allocate a bigger column name message buffer and retry. if (cchName > nameLen - 1) { nameLen = cchName + 1; if (!pyodbc_realloc((BYTE**) &szName, (nameLen + 1) * sizeof(ODBCCHAR))) { PyErr_NoMemory(); goto done; } goto retry; } const TextEnc& enc = cur->cnxn->metadata_enc; // HACK: I don't know the exact issue, but iODBC + Teradata results in either UCS4 data // or 4-byte SQLWCHAR. I'm going to use UTF-32 as an indication that's what we have. Py_ssize_t cbName = cchName; switch (enc.optenc) { case OPTENC_UTF32: case OPTENC_UTF32LE: case OPTENC_UTF32BE: cbName *= 4; break; default: if (enc.ctype == SQL_C_WCHAR) cbName *= 2; break; } TRACE("Col %d: type=%s (%d) colsize=%d\n", (i+1), SqlTypeName(nDataType), (int)nDataType, (int)nColSize); Object name(TextBufferToObject(enc, szName, cbName)); if (!name) goto done; if (lower) { PyObject* l = PyObject_CallMethod(name, "lower", 0); if (!l) goto done; name.Attach(l); } type = PythonTypeFromSqlType(cur, nDataType); if (!type) goto done; switch (nullable) { case SQL_NO_NULLS: nullable_obj = Py_False; break; case SQL_NULLABLE: nullable_obj = Py_True; break; case SQL_NULLABLE_UNKNOWN: default: nullable_obj = Py_None; break; } // The Oracle ODBC driver has a bug (I call it) that it returns a data size of 0 when a numeric value is // retrieved from a UNION: http://support.microsoft.com/?scid=kb%3Ben-us%3B236786&x=13&y=6 // // Unfortunately, I don't have a test system for this yet, so I'm *trying* something. (Not a good sign.) If // the size is zero and it appears to be a numeric type, we'll try to come up with our own length using any // other data we can get. if (nColSize == 0 && IsNumericType(nDataType)) { // I'm not sure how if (cDecimalDigits != 0) { nColSize = (SQLUINTEGER)(cDecimalDigits + 3); } else { // I'm not sure if this is a good idea, but ... nColSize = 42; } } colinfo = Py_BuildValue("(OOOiiiO)", name.Get(), type, // type_code Py_None, // display size (int)nColSize, // internal_size (int)nColSize, // precision (int)cDecimalDigits, // scale nullable_obj); // null_ok if (!colinfo) goto done; nullable_obj = 0; index = PyInt_FromLong(i); if (!index) goto done; PyDict_SetItem(colmap, name.Get(), index); Py_DECREF(index); // SetItemString increments index = 0; PyTuple_SET_ITEM(desc, i, colinfo); colinfo = 0; // reference stolen by SET_ITEM } Py_XDECREF(cur->description); cur->description = desc; desc = 0; cur->map_name_to_index = colmap; colmap = 0; success = true; done: Py_XDECREF(nullable_obj); Py_XDECREF(desc); Py_XDECREF(colmap); Py_XDECREF(index); Py_XDECREF(colinfo); pyodbc_free(szName); return success; } enum free_results_flags { FREE_STATEMENT = 0x01, KEEP_STATEMENT = 0x02, FREE_PREPARED = 0x04, KEEP_PREPARED = 0x08, KEEP_MESSAGES = 0x10, STATEMENT_MASK = 0x03, PREPARED_MASK = 0x0C }; static bool free_results(Cursor* self, int flags) { // Internal function called any time we need to free the memory associated with query results. It is safe to call // this even when a query has not been executed. // If we ran out of memory, it is possible that we have a cursor but colinfos is zero. However, we should be // deleting this object, so the cursor will be freed when the HSTMT is destroyed. */ I((flags & STATEMENT_MASK) != 0); I((flags & PREPARED_MASK) != 0); if ((flags & PREPARED_MASK) == FREE_PREPARED) { Py_XDECREF(self->pPreparedSQL); self->pPreparedSQL = 0; } if (self->colinfos) { pyodbc_free(self->colinfos); self->colinfos = 0; } if (StatementIsValid(self)) { if ((flags & STATEMENT_MASK) == FREE_STATEMENT) { Py_BEGIN_ALLOW_THREADS SQLFreeStmt(self->hstmt, SQL_CLOSE); Py_END_ALLOW_THREADS; } else { Py_BEGIN_ALLOW_THREADS SQLFreeStmt(self->hstmt, SQL_UNBIND); SQLFreeStmt(self->hstmt, SQL_RESET_PARAMS); Py_END_ALLOW_THREADS; } if (self->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } } if (self->description != Py_None) { Py_DECREF(self->description); self->description = Py_None; Py_INCREF(Py_None); } if (self->map_name_to_index) { Py_DECREF(self->map_name_to_index); self->map_name_to_index = 0; } if ((flags & KEEP_MESSAGES) == 0) { Py_XDECREF(self->messages); self->messages = PyList_New(0); } self->rowcount = -1; return true; } static void closeimpl(Cursor* cur) { // An internal function for the shared 'closing' code used by Cursor_close and Cursor_dealloc. // // This method releases the GIL lock while closing, so verify the HDBC still exists if you use it. free_results(cur, FREE_STATEMENT | FREE_PREPARED); FreeParameterInfo(cur); FreeParameterData(cur); if (StatementIsValid(cur)) { HSTMT hstmt = cur->hstmt; cur->hstmt = SQL_NULL_HANDLE; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLFreeHandle(SQL_HANDLE_STMT, hstmt); Py_END_ALLOW_THREADS // If there is already an exception, don't overwrite it. if (!SQL_SUCCEEDED(ret) && !PyErr_Occurred()) RaiseErrorFromHandle(cur->cnxn, "SQLFreeHandle", cur->cnxn->hdbc, SQL_NULL_HANDLE); } Py_XDECREF(cur->pPreparedSQL); Py_XDECREF(cur->description); Py_XDECREF(cur->map_name_to_index); Py_XDECREF(cur->cnxn); Py_XDECREF(cur->messages); cur->pPreparedSQL = 0; cur->description = 0; cur->map_name_to_index = 0; cur->cnxn = 0; cur->messages = 0; } static char close_doc[] = "Close the cursor now (rather than whenever __del__ is called). The cursor will\n" "be unusable from this point forward; a ProgrammingError exception will be\n" "raised if any operation is attempted with the cursor."; static PyObject* Cursor_close(PyObject* self, PyObject* args) { UNUSED(args); Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; closeimpl(cursor); if (PyErr_Occurred()) return 0; Py_INCREF(Py_None); return Py_None; } static void Cursor_dealloc(Cursor* cursor) { if (Cursor_Validate((PyObject*)cursor, CURSOR_REQUIRE_CNXN)) { closeimpl(cursor); } Py_XDECREF(cursor->inputsizes); PyObject_Del(cursor); } bool InitColumnInfo(Cursor* cursor, SQLUSMALLINT iCol, ColumnInfo* pinfo) { // Initializes ColumnInfo from result set metadata. SQLRETURN ret; // REVIEW: This line fails on OS/X with the FileMaker driver : http://www.filemaker.com/support/updaters/xdbc_odbc_mac.html // // I suspect the problem is that it doesn't allow NULLs in some of the parameters, so I'm going to supply them all // to see what happens. SQLCHAR ColumnName[200]; SQLSMALLINT BufferLength = _countof(ColumnName); SQLSMALLINT NameLength = 0; SQLSMALLINT DataType = 0; SQLULEN ColumnSize = 0; SQLSMALLINT DecimalDigits = 0; SQLSMALLINT Nullable = 0; Py_BEGIN_ALLOW_THREADS ret = SQLDescribeCol(cursor->hstmt, iCol, ColumnName, BufferLength, &NameLength, &DataType, &ColumnSize, &DecimalDigits, &Nullable); Py_END_ALLOW_THREADS pinfo->sql_type = DataType; pinfo->column_size = ColumnSize; if (cursor->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cursor->cnxn, "SQLDescribeCol", cursor->cnxn->hdbc, cursor->hstmt); return false; } // If it is an integer type, determine if it is signed or unsigned. The buffer size is the same but we'll need to // know when we convert to a Python integer. switch (pinfo->sql_type) { case SQL_TINYINT: case SQL_SMALLINT: case SQL_INTEGER: case SQL_BIGINT: { SQLLEN f; Py_BEGIN_ALLOW_THREADS ret = SQLColAttribute(cursor->hstmt, iCol, SQL_DESC_UNSIGNED, 0, 0, 0, &f); Py_END_ALLOW_THREADS if (cursor->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cursor->cnxn, "SQLColAttribute", cursor->cnxn->hdbc, cursor->hstmt); return false; } pinfo->is_unsigned = (f == SQL_TRUE); break; } default: pinfo->is_unsigned = false; } return true; } static bool PrepareResults(Cursor* cur, int cCols) { // Called after a SELECT has been executed to perform pre-fetch work. // // Allocates the ColumnInfo structures describing the returned data. int i; I(cur->colinfos == 0); cur->colinfos = (ColumnInfo*)pyodbc_malloc(sizeof(ColumnInfo) * cCols); if (cur->colinfos == 0) { PyErr_NoMemory(); return false; } for (i = 0; i < cCols; i++) { if (!InitColumnInfo(cur, (SQLUSMALLINT)(i + 1), &cur->colinfos[i])) { pyodbc_free(cur->colinfos); cur->colinfos = 0; return false; } } return true; } static int GetDiagRecs(Cursor* cur) { // Retrieves all diagnostic records from the cursor and assigns them to the "messages" attribute. PyObject* msg_list; // the "messages" as a Python list of diagnostic records SQLSMALLINT iRecNumber = 1; // the index of the diagnostic records (1-based) ODBCCHAR cSQLState[6]; // five-character SQLSTATE code (plus terminating NULL) SQLINTEGER iNativeError; SQLSMALLINT iMessageLen = 1023; ODBCCHAR *cMessageText = (ODBCCHAR*) pyodbc_malloc((iMessageLen + 1) * sizeof(ODBCCHAR)); SQLSMALLINT iTextLength; SQLRETURN ret; char sqlstate_ascii[6] = ""; // ASCII version of the SQLState if (!cMessageText) { PyErr_NoMemory(); return 0; } msg_list = PyList_New(0); if (!msg_list) return 0; for (;;) { cSQLState[0] = 0; iNativeError = 0; cMessageText[0] = 0; iTextLength = 0; Py_BEGIN_ALLOW_THREADS ret = SQLGetDiagRecW( SQL_HANDLE_STMT, cur->hstmt, iRecNumber, (SQLWCHAR*)cSQLState, &iNativeError, (SQLWCHAR*)cMessageText, iMessageLen, &iTextLength ); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) break; // If needed, allocate a bigger error message buffer and retry. if (iTextLength > iMessageLen - 1) { iMessageLen = iTextLength + 1; if (!pyodbc_realloc((BYTE**) &cMessageText, (iMessageLen + 1) * sizeof(ODBCCHAR))) { pyodbc_free(cMessageText); PyErr_NoMemory(); return 0; } Py_BEGIN_ALLOW_THREADS ret = SQLGetDiagRecW( SQL_HANDLE_STMT, cur->hstmt, iRecNumber, (SQLWCHAR*)cSQLState, &iNativeError, (SQLWCHAR*)cMessageText, iMessageLen, &iTextLength ); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) break; } cSQLState[5] = 0; // Not always NULL terminated (MS Access) CopySqlState(cSQLState, sqlstate_ascii); PyObject* msg_class = PyUnicode_FromFormat("[%s] (%ld)", sqlstate_ascii, (long)iNativeError); // Default to UTF-16, which may not work if the driver/manager is using some other encoding const char *unicode_enc = cur->cnxn ? cur->cnxn->metadata_enc.name : ENCSTR_UTF16NE; PyObject* msg_value = PyUnicode_Decode( (char*)cMessageText, iTextLength * sizeof(ODBCCHAR), unicode_enc, "strict" ); if (!msg_value) { // If the char cannot be decoded, return something rather than nothing. Py_XDECREF(msg_value); msg_value = PyBytes_FromStringAndSize((char*)cMessageText, iTextLength * sizeof(ODBCCHAR)); } PyObject* msg_tuple = PyTuple_New(2); // the message as a Python tuple of class and value if (msg_class && msg_value && msg_tuple) { PyTuple_SetItem(msg_tuple, 0, msg_class); // msg_tuple now owns the msg_class reference PyTuple_SetItem(msg_tuple, 1, msg_value); // msg_tuple now owns the msg_value reference PyList_Append(msg_list, msg_tuple); Py_XDECREF(msg_tuple); // whether PyList_Append succeeds or not } else { Py_XDECREF(msg_class); Py_XDECREF(msg_value); Py_XDECREF(msg_tuple); } iRecNumber++; } pyodbc_free(cMessageText); Py_XDECREF(cur->messages); cur->messages = msg_list; // cur->messages now owns the msg_list reference return 0; } static PyObject* execute(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first) { // Internal function to execute SQL, called by .execute and .executemany. // // pSql // A PyString, PyUnicode, or derived object containing the SQL. // // params // Pointer to an optional sequence of parameters, and possibly the SQL statement (see skip_first): // (SQL, param1, param2) or (param1, param2). // // skip_first // If true, the first element in `params` is ignored. (It will be the SQL statement and `params` will be the // entire tuple passed to Cursor.execute.) Otherwise all of the params are used. (This case occurs when called // from Cursor.executemany, in which case the sequences do not contain the SQL statement.) Ignored if params is // zero. if (params) { if (!PyTuple_Check(params) && !PyList_Check(params) && !Row_Check(params)) return RaiseErrorV(0, PyExc_TypeError, "Params must be in a list, tuple, or Row"); } // Normalize the parameter variables. int params_offset = skip_first ? 1 : 0; Py_ssize_t cParams = params == 0 ? 0 : PySequence_Length(params) - params_offset; SQLRETURN ret = 0; free_results(cur, FREE_STATEMENT | KEEP_PREPARED); const char* szLastFunction = ""; if (cParams > 0) { // There are parameters, so we'll need to prepare the SQL statement and bind the parameters. (We need to // prepare the statement because we can't bind a NULL (None) object without knowing the target datatype. There // is no one data type that always maps to the others (no, not even varchar)). if (!PrepareAndBind(cur, pSql, params, skip_first)) return 0; szLastFunction = "SQLExecute"; Py_BEGIN_ALLOW_THREADS ret = SQLExecute(cur->hstmt); Py_END_ALLOW_THREADS } else { // REVIEW: Why don't we always prepare? It is highly unlikely that a user would need to execute the same SQL // repeatedly if it did not have parameters, so we are not losing performance, but it would simplify the code. Py_XDECREF(cur->pPreparedSQL); cur->pPreparedSQL = 0; szLastFunction = "SQLExecDirect"; const TextEnc* penc = 0; #if PY_MAJOR_VERSION < 3 if (PyString_Check(pSql)) { penc = &cur->cnxn->str_enc; } else #endif { penc = &cur->cnxn->unicode_enc; } Object query(penc->Encode(pSql)); if (!query) return 0; bool isWide = (penc->ctype == SQL_C_WCHAR); const char* pch = PyBytes_AS_STRING(query.Get()); SQLINTEGER cch = (SQLINTEGER)(PyBytes_GET_SIZE(query.Get()) / (isWide ? sizeof(ODBCCHAR) : 1)); Py_BEGIN_ALLOW_THREADS if (isWide) ret = SQLExecDirectW(cur->hstmt, (SQLWCHAR*)pch, cch); else ret = SQLExecDirect(cur->hstmt, (SQLCHAR*)pch, cch); Py_END_ALLOW_THREADS } if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. FreeParameterData(cur); return RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); } if (!SQL_SUCCEEDED(ret) && ret != SQL_NEED_DATA && ret != SQL_NO_DATA) { // We could try dropping through the while and if below, but if there is an error, we need to raise it before // FreeParameterData calls more ODBC functions. RaiseErrorFromHandle(cur->cnxn, "SQLExecDirectW", cur->cnxn->hdbc, cur->hstmt); FreeParameterData(cur); return 0; } if (ret == SQL_SUCCESS_WITH_INFO) { GetDiagRecs(cur); } while (ret == SQL_NEED_DATA) { // One or more parameters were too long to bind normally so we set the // length to SQL_LEN_DATA_AT_EXEC. ODBC will return SQL_NEED_DATA for // each of the parameters we did this for. // // For each one we set a pointer to the ParamInfo as the "parameter // data" we can access with SQLParamData. We've stashed everything we // need in there. szLastFunction = "SQLParamData"; ParamInfo* pInfo; Py_BEGIN_ALLOW_THREADS ret = SQLParamData(cur->hstmt, (SQLPOINTER*)&pInfo); Py_END_ALLOW_THREADS if (ret != SQL_NEED_DATA && ret != SQL_NO_DATA && !SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLParamData", cur->cnxn->hdbc, cur->hstmt); TRACE("SQLParamData() --> %d\n", ret); if (ret == SQL_NEED_DATA) { szLastFunction = "SQLPutData"; if (pInfo->pObject && (PyBytes_Check(pInfo->pObject) #if PY_VERSION_HEX >= 0x02060000 || PyByteArray_Check(pInfo->pObject) #endif )) { char *(*pGetPtr)(PyObject*); Py_ssize_t (*pGetLen)(PyObject*); #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(pInfo->pObject)) { pGetPtr = PyByteArray_AsString; pGetLen = PyByteArray_Size; } else #endif { pGetPtr = PyBytes_AsString; pGetLen = PyBytes_Size; } const char* p = pGetPtr(pInfo->pObject); SQLLEN cb = (SQLLEN)pGetLen(pInfo->pObject); SQLLEN offset = 0; do { SQLLEN remaining = pInfo->maxlength ? min(pInfo->maxlength, cb - offset) : cb; TRACE("SQLPutData [%d] (%d) %.10s\n", offset, remaining, &p[offset]); Py_BEGIN_ALLOW_THREADS ret = SQLPutData(cur->hstmt, (SQLPOINTER)&p[offset], remaining); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLPutData", cur->cnxn->hdbc, cur->hstmt); offset += remaining; } while (offset < cb); } #if PY_MAJOR_VERSION < 3 else if (pInfo->pObject && PyBuffer_Check(pInfo->pObject)) { // Buffers can have multiple segments, so we might need multiple writes. Looping through buffers isn't // difficult, but we've wrapped it up in an iterator object to keep this loop simple. BufferSegmentIterator it(pInfo->pObject); byte* pb; SQLLEN cb; while (it.Next(pb, cb)) { Py_BEGIN_ALLOW_THREADS ret = SQLPutData(cur->hstmt, pb, cb); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLPutData", cur->cnxn->hdbc, cur->hstmt); } } #endif else if (pInfo->ParameterType == SQL_SS_TABLE) { // TVP // Need to convert its columns into the bound row buffers int hasTvpRows = 0; if (pInfo->curTvpRow < PySequence_Length(pInfo->pObject)) { PyObject *tvpRow = PySequence_GetItem(pInfo->pObject, pInfo->curTvpRow); Py_XDECREF(tvpRow); for (Py_ssize_t i = 0; i < PySequence_Size(tvpRow); i++) { struct ParamInfo newParam; struct ParamInfo *prevParam = pInfo->nested + i; PyObject *cell = PySequence_GetItem(tvpRow, i); Py_XDECREF(cell); memset(&newParam, 0, sizeof(newParam)); if (!GetParameterInfo(cur, i, cell, newParam, true)) { // Error converting object FreeParameterData(cur); return NULL; } if((newParam.ValueType != SQL_C_DEFAULT && prevParam->ValueType != SQL_C_DEFAULT) && (newParam.ValueType != prevParam->ValueType || newParam.ParameterType != prevParam->ParameterType)) { FreeParameterData(cur); return RaiseErrorV(0, ProgrammingError, "Type mismatch between TVP row values"); } if (prevParam->allocated) pyodbc_free(prevParam->ParameterValuePtr); Py_XDECREF(prevParam->pObject); newParam.BufferLength = newParam.StrLen_or_Ind; newParam.StrLen_or_Ind = SQL_DATA_AT_EXEC; Py_INCREF(cell); newParam.pObject = cell; *prevParam = newParam; if(prevParam->ParameterValuePtr == &newParam.Data) { prevParam->ParameterValuePtr = &prevParam->Data; } } pInfo->curTvpRow++; hasTvpRows = 1; } Py_BEGIN_ALLOW_THREADS ret = SQLPutData(cur->hstmt, hasTvpRows ? (SQLPOINTER)1 : 0, hasTvpRows); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLPutData", cur->cnxn->hdbc, cur->hstmt); } else { // TVP column sent as DAE Py_BEGIN_ALLOW_THREADS ret = SQLPutData(cur->hstmt, pInfo->ParameterValuePtr, pInfo->BufferLength); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLPutData", cur->cnxn->hdbc, cur->hstmt); } ret = SQL_NEED_DATA; } } FreeParameterData(cur); if (ret == SQL_NO_DATA) { // Example: A delete statement that did not delete anything. cur->rowcount = 0; Py_INCREF(cur); return (PyObject*)cur; } if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, szLastFunction, cur->cnxn->hdbc, cur->hstmt); SQLLEN cRows = -1; Py_BEGIN_ALLOW_THREADS ret = SQLRowCount(cur->hstmt, &cRows); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLRowCount", cur->cnxn->hdbc, cur->hstmt); cur->rowcount = (int)cRows; TRACE("SQLRowCount: %d\n", cRows); SQLSMALLINT cCols = 0; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { // Note: The SQL Server driver sometimes returns HY007 here if multiple statements (separated by ;) were // submitted. This is not documented, but I've seen it with multiple successful inserts. return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); } TRACE("SQLNumResultCols: %d\n", cCols); if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. return RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); } if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLRowCount", cur->cnxn->hdbc, cur->hstmt); if (cCols != 0) { // A result set was created. if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, lowercase())) return 0; } Py_INCREF(cur); return (PyObject*)cur; } inline bool IsSequence(PyObject* p) { // Used to determine if the first parameter of execute is a collection of SQL parameters or is a SQL parameter // itself. If the first parameter is a list, tuple, or Row object, then we consider it a collection. Anything // else, including other sequences (e.g. bytearray), are considered SQL parameters. return PyList_Check(p) || PyTuple_Check(p) || Row_Check(p); } static char execute_doc[] = "C.execute(sql, [params]) --> Cursor\n" "\n" "Prepare and execute a database query or command.\n" "\n" "Parameters may be provided as a sequence (as specified by the DB API) or\n" "simply passed in one after another (non-standard):\n" "\n" " cursor.execute(sql, (param1, param2))\n" "\n" " or\n" "\n" " cursor.execute(sql, param1, param2)\n"; PyObject* Cursor_execute(PyObject* self, PyObject* args) { Py_ssize_t cParams = PyTuple_Size(args) - 1; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; if (cParams < 0) { PyErr_SetString(PyExc_TypeError, "execute() takes at least 1 argument (0 given)"); return 0; } PyObject* pSql = PyTuple_GET_ITEM(args, 0); if (!PyString_Check(pSql) && !PyUnicode_Check(pSql)) { PyErr_SetString(PyExc_TypeError, "The first argument to execute must be a string or unicode query."); return 0; } // Figure out if there were parameters and how they were passed. Our optional parameter passing complicates this slightly. bool skip_first = false; PyObject *params = 0; if (cParams == 1 && IsSequence(PyTuple_GET_ITEM(args, 1))) { // There is a single argument and it is a sequence, so we must treat it as a sequence of parameters. (This is // the normal Cursor.execute behavior.) params = PyTuple_GET_ITEM(args, 1); skip_first = false; } else if (cParams > 0) { params = args; skip_first = true; } // Execute. return execute(cursor, pSql, params, skip_first); } static PyObject* Cursor_executemany(PyObject* self, PyObject* args) { Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; cursor->rowcount = -1; PyObject *pSql, *param_seq; if (!PyArg_ParseTuple(args, "OO", &pSql, ¶m_seq)) return 0; if (!PyString_Check(pSql) && !PyUnicode_Check(pSql)) { PyErr_SetString(PyExc_TypeError, "The first argument to execute must be a string or unicode query."); return 0; } if (IsSequence(param_seq)) { Py_ssize_t c = PySequence_Size(param_seq); if (c == 0) { PyErr_SetString(ProgrammingError, "The second parameter to executemany must not be empty."); return 0; } if (cursor->fastexecmany) { free_results(cursor, FREE_STATEMENT | KEEP_PREPARED); if (!ExecuteMulti(cursor, pSql, param_seq)) return 0; } else { for (Py_ssize_t i = 0; i < c; i++) { PyObject* params = PySequence_GetItem(param_seq, i); PyObject* result = execute(cursor, pSql, params, false); bool success = result != 0; Py_XDECREF(result); Py_DECREF(params); if (!success) { cursor->rowcount = -1; return 0; } } } } else if (PyGen_Check(param_seq) || PyIter_Check(param_seq)) { Object iter; if (PyGen_Check(param_seq)) { iter = PyObject_GetIter(param_seq); } else { iter = param_seq; Py_INCREF(param_seq); } Object params; while (params.Attach(PyIter_Next(iter))) { PyObject* result = execute(cursor, pSql, params, false); bool success = result != 0; Py_XDECREF(result); if (!success) { cursor->rowcount = -1; return 0; } } if (PyErr_Occurred()) return 0; } else { PyErr_SetString(ProgrammingError, "The second parameter to executemany must be a sequence, iterator, or generator."); return 0; } cursor->rowcount = -1; Py_RETURN_NONE; } static PyObject* Cursor_setinputsizes(PyObject* self, PyObject* sizes) { if (!Cursor_Check(self)) { PyErr_SetString(ProgrammingError, "Invalid cursor object."); return 0; } Cursor *cur = (Cursor*)self; if (Py_None == sizes) { Py_XDECREF(cur->inputsizes); cur->inputsizes = 0; } else { if (!IsSequence(sizes)) { PyErr_SetString(ProgrammingError, "A non-None parameter to setinputsizes must be a sequence, iterator, or generator."); return 0; } Py_XDECREF(cur->inputsizes); Py_INCREF(sizes); cur->inputsizes = sizes; } Py_RETURN_NONE; } static PyObject* Cursor_fetch(Cursor* cur) { // Internal function to fetch a single row and construct a Row object from it. Used by all of the fetching // functions. // // Returns a Row object if successful. If there are no more rows, zero is returned. If an error occurs, an // exception is set and zero is returned. (To differentiate between the last two, use PyErr_Occurred.) SQLRETURN ret = 0; Py_ssize_t field_count, i; PyObject** apValues; Py_BEGIN_ALLOW_THREADS ret = SQLFetch(cur->hstmt); Py_END_ALLOW_THREADS if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. return RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); } if (ret == SQL_NO_DATA) return 0; if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLFetch", cur->cnxn->hdbc, cur->hstmt); field_count = PyTuple_GET_SIZE(cur->description); apValues = (PyObject**)pyodbc_malloc(sizeof(PyObject*) * field_count); if (apValues == 0) return PyErr_NoMemory(); for (i = 0; i < field_count; i++) { PyObject* value = GetData(cur, i); if (!value) { FreeRowValues(i, apValues); return 0; } apValues[i] = value; } return (PyObject*)Row_InternalNew(cur->description, cur->map_name_to_index, field_count, apValues); } static PyObject* Cursor_fetchlist(Cursor* cur, Py_ssize_t max) { // max // The maximum number of rows to fetch. If -1, fetch all rows. // // Returns a list of Rows. If there are no rows, an empty list is returned. PyObject* results; PyObject* row; results = PyList_New(0); if (!results) return 0; while (max == -1 || max > 0) { row = Cursor_fetch(cur); if (!row) { if (PyErr_Occurred()) { Py_DECREF(results); return 0; } break; } PyList_Append(results, row); Py_DECREF(row); if (max != -1) max--; } return results; } static PyObject* Cursor_iter(PyObject* self) { Py_INCREF(self); return self; } static PyObject* Cursor_iternext(PyObject* self) { // Implements the iterator protocol for cursors. Fetches the next row. Returns zero without setting an exception // when there are no rows. PyObject* result; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; result = Cursor_fetch(cursor); return result; } static PyObject* Cursor_fetchval(PyObject* self, PyObject* args) { UNUSED(args); Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; Object row(Cursor_fetch(cursor)); if (!row) { if (PyErr_Occurred()) return 0; Py_RETURN_NONE; } return Row_item(row, 0); } static PyObject* Cursor_fetchone(PyObject* self, PyObject* args) { UNUSED(args); PyObject* row; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; row = Cursor_fetch(cursor); if (!row) { if (PyErr_Occurred()) return 0; Py_RETURN_NONE; } return row; } static PyObject* Cursor_fetchall(PyObject* self, PyObject* args) { UNUSED(args); PyObject* result; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; result = Cursor_fetchlist(cursor, -1); return result; } static PyObject* Cursor_fetchmany(PyObject* self, PyObject* args) { long rows; PyObject* result; Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; rows = cursor->arraysize; if (!PyArg_ParseTuple(args, "|l", &rows)) return 0; result = Cursor_fetchlist(cursor, rows); return result; } static char tables_doc[] = "C.tables(table=None, catalog=None, schema=None, tableType=None) --> self\n" "\n" "Executes SQLTables and creates a results set of tables defined in the data\n" "source.\n" "\n" "The table, catalog, and schema interpret the '_' and '%' characters as\n" "wildcards. The escape character is driver specific, so use\n" "`Connection.searchescape`.\n" "\n" "Each row fetched has the following columns:\n" " 0) table_cat: The catalog name.\n" " 1) table_schem: The schema name.\n" " 2) table_name: The table name.\n" " 3) table_type: One of 'TABLE', 'VIEW', SYSTEM TABLE', 'GLOBAL TEMPORARY'\n" " 'LOCAL TEMPORARY', 'ALIAS', 'SYNONYM', or a data source-specific type name."; char* Cursor_tables_kwnames[] = { "table", "catalog", "schema", "tableType", 0 }; static PyObject* Cursor_tables(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szCatalog = 0; const char* szSchema = 0; const char* szTableName = 0; const char* szTableType = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|zzzz", Cursor_tables_kwnames, &szTableName, &szCatalog, &szSchema, &szTableType)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLTables(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTableName, SQL_NTS, (SQLCHAR*)szTableType, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLTables", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char columns_doc[] = "C.columns(table=None, catalog=None, schema=None, column=None)\n\n" "Creates a results set of column names in specified tables by executing the ODBC SQLColumns function.\n" "Each row fetched has the following columns:\n" " 0) table_cat\n" " 1) table_schem\n" " 2) table_name\n" " 3) column_name\n" " 4) data_type\n" " 5) type_name\n" " 6) column_size\n" " 7) buffer_length\n" " 8) decimal_digits\n" " 9) num_prec_radix\n" " 10) nullable\n" " 11) remarks\n" " 12) column_def\n" " 13) sql_data_type\n" " 14) sql_datetime_sub\n" " 15) char_octet_length\n" " 16) ordinal_position\n" " 17) is_nullable"; char* Cursor_column_kwnames[] = { "table", "catalog", "schema", "column", 0 }; static PyObject* Cursor_columns(PyObject* self, PyObject* args, PyObject* kwargs) { PyObject* pCatalog = 0; PyObject* pSchema = 0; PyObject* pTable = 0; PyObject* pColumn = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOO", Cursor_column_kwnames, &pTable, &pCatalog, &pSchema, &pColumn)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; const TextEnc& enc = cur->cnxn->metadata_enc; SQLWChar catalog(pCatalog, enc); SQLWChar schema(pSchema, enc); SQLWChar table(pTable, enc); SQLWChar column(pColumn, enc); if (!catalog.isValidOrNone() || !schema.isValidOrNone() || !table.isValidOrNone() || !column.isValidOrNone()) return 0; Py_BEGIN_ALLOW_THREADS ret = SQLColumnsW(cur->hstmt, catalog.psz, SQL_NTS, schema.psz, SQL_NTS, table.psz, SQL_NTS, column.psz, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLColumns", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char statistics_doc[] = "C.statistics(catalog=None, schema=None, unique=False, quick=True) --> self\n\n" "Creates a results set of statistics about a single table and the indexes associated with \n" "the table by executing SQLStatistics.\n" "unique\n" " If True, only unique indexes are returned. Otherwise all indexes are returned.\n" "quick\n" " If True, CARDINALITY and PAGES are returned only if they are readily available\n" " from the server\n" "\n" "Each row fetched has the following columns:\n\n" " 0) table_cat\n" " 1) table_schem\n" " 2) table_name\n" " 3) non_unique\n" " 4) index_qualifier\n" " 5) index_name\n" " 6) type\n" " 7) ordinal_position\n" " 8) column_name\n" " 9) asc_or_desc\n" " 10) cardinality\n" " 11) pages\n" " 12) filter_condition"; char* Cursor_statistics_kwnames[] = { "table", "catalog", "schema", "unique", "quick", 0 }; static PyObject* Cursor_statistics(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szCatalog = 0; const char* szSchema = 0; const char* szTable = 0; PyObject* pUnique = Py_False; PyObject* pQuick = Py_True; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|zzOO", Cursor_statistics_kwnames, &szTable, &szCatalog, &szSchema, &pUnique, &pQuick)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLUSMALLINT nUnique = (SQLUSMALLINT)(PyObject_IsTrue(pUnique) ? SQL_INDEX_UNIQUE : SQL_INDEX_ALL); SQLUSMALLINT nReserved = (SQLUSMALLINT)(PyObject_IsTrue(pQuick) ? SQL_QUICK : SQL_ENSURE); SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLStatistics(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, nUnique, nReserved); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLStatistics", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char rowIdColumns_doc[] = "C.rowIdColumns(table, catalog=None, schema=None, nullable=True) -->\n\n" "Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a result set of columns that\n" "uniquely identify a row\n\n" "Each row fetched has the following columns:\n" " 0) scope\n" " 1) column_name\n" " 2) data_type\n" " 3) type_name\n" " 4) column_size\n" " 5) buffer_length\n" " 6) decimal_digits\n" " 7) pseudo_column"; static char rowVerColumns_doc[] = "C.rowIdColumns(table, catalog=None, schema=None, nullable=True) --> self\n\n" "Executes SQLSpecialColumns with SQL_ROWVER which creates a result set of columns that\n" "are automatically updated when any value in the row is updated.\n\n" "Each row fetched has the following columns:\n" " 0) scope\n" " 1) column_name\n" " 2) data_type\n" " 3) type_name\n" " 4) column_size\n" " 5) buffer_length\n" " 6) decimal_digits\n" " 7) pseudo_column"; char* Cursor_specialColumn_kwnames[] = { "table", "catalog", "schema", "nullable", 0 }; static PyObject* _specialColumns(PyObject* self, PyObject* args, PyObject* kwargs, SQLUSMALLINT nIdType) { const char* szTable; const char* szCatalog = 0; const char* szSchema = 0; PyObject* pNullable = Py_True; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|zzO", Cursor_specialColumn_kwnames, &szTable, &szCatalog, &szSchema, &pNullable)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; SQLUSMALLINT nNullable = (SQLUSMALLINT)(PyObject_IsTrue(pNullable) ? SQL_NULLABLE : SQL_NO_NULLS); Py_BEGIN_ALLOW_THREADS ret = SQLSpecialColumns(cur->hstmt, nIdType, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, SQL_SCOPE_TRANSACTION, nNullable); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLSpecialColumns", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static PyObject* Cursor_rowIdColumns(PyObject* self, PyObject* args, PyObject* kwargs) { return _specialColumns(self, args, kwargs, SQL_BEST_ROWID); } static PyObject* Cursor_rowVerColumns(PyObject* self, PyObject* args, PyObject* kwargs) { return _specialColumns(self, args, kwargs, SQL_ROWVER); } static char primaryKeys_doc[] = "C.primaryKeys(table, catalog=None, schema=None) --> self\n\n" "Creates a results set of column names that make up the primary key for a table\n" "by executing the SQLPrimaryKeys function.\n" "Each row fetched has the following columns:\n" " 0) table_cat\n" " 1) table_schem\n" " 2) table_name\n" " 3) column_name\n" " 4) key_seq\n" " 5) pk_name"; char* Cursor_primaryKeys_kwnames[] = { "table", "catalog", "schema", 0 }; static PyObject* Cursor_primaryKeys(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szTable; const char* szCatalog = 0; const char* szSchema = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|zz", Cursor_primaryKeys_kwnames, &szTable, &szCatalog, &szSchema)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLPrimaryKeys(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLPrimaryKeys", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char foreignKeys_doc[] = "C.foreignKeys(table=None, catalog=None, schema=None,\n" " foreignTable=None, foreignCatalog=None, foreignSchema=None) --> self\n\n" "Executes the SQLForeignKeys function and creates a results set of column names\n" "that are foreign keys in the specified table (columns in the specified table\n" "that refer to primary keys in other tables) or foreign keys in other tables\n" "that refer to the primary key in the specified table.\n\n" "Each row fetched has the following columns:\n" " 0) pktable_cat\n" " 1) pktable_schem\n" " 2) pktable_name\n" " 3) pkcolumn_name\n" " 4) fktable_cat\n" " 5) fktable_schem\n" " 6) fktable_name\n" " 7) fkcolumn_name\n" " 8) key_seq\n" " 9) update_rule\n" " 10) delete_rule\n" " 11) fk_name\n" " 12) pk_name\n" " 13) deferrability"; char* Cursor_foreignKeys_kwnames[] = { "table", "catalog", "schema", "foreignTable", "foreignCatalog", "foreignSchema", 0 }; static PyObject* Cursor_foreignKeys(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szTable = 0; const char* szCatalog = 0; const char* szSchema = 0; const char* szForeignTable = 0; const char* szForeignCatalog = 0; const char* szForeignSchema = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|zzzzzz", Cursor_foreignKeys_kwnames, &szTable, &szCatalog, &szSchema, &szForeignTable, &szForeignCatalog, &szForeignSchema)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLForeignKeys(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, (SQLCHAR*)szForeignCatalog, SQL_NTS, (SQLCHAR*)szForeignSchema, SQL_NTS, (SQLCHAR*)szForeignTable, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLForeignKeys", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char getTypeInfo_doc[] = "C.getTypeInfo(sqlType=None) --> self\n\n" "Executes SQLGetTypeInfo a creates a result set with information about the\n" "specified data type or all data types supported by the ODBC driver if not\n" "specified.\n\n" "Each row fetched has the following columns:\n" " 0) type_name\n" " 1) data_type\n" " 2) column_size\n" " 3) literal_prefix\n" " 4) literal_suffix\n" " 5) create_params\n" " 6) nullable\n" " 7) case_sensitive\n" " 8) searchable\n" " 9) unsigned_attribute\n" "10) fixed_prec_scale\n" "11) auto_unique_value\n" "12) local_type_name\n" "13) minimum_scale\n" "14) maximum_scale\n" "15) sql_data_type\n" "16) sql_datetime_sub\n" "17) num_prec_radix\n" "18) interval_precision"; static PyObject* Cursor_getTypeInfo(PyObject* self, PyObject* args, PyObject* kwargs) { UNUSED(kwargs); int nDataType = SQL_ALL_TYPES; if (!PyArg_ParseTuple(args, "|i", &nDataType)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLGetTypeInfo(cur->hstmt, (SQLSMALLINT)nDataType); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetTypeInfo", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static PyObject* Cursor_nextset(PyObject* self, PyObject* args) { UNUSED(args); Cursor* cur = Cursor_Validate(self, 0); if (!cur) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLMoreResults(cur->hstmt); Py_END_ALLOW_THREADS if (ret == SQL_NO_DATA) { free_results(cur, FREE_STATEMENT | KEEP_PREPARED); Py_RETURN_FALSE; } if (!SQL_SUCCEEDED(ret)) { TRACE("nextset: %d not SQL_SUCCEEDED\n", ret); // Note: The SQL Server driver sometimes returns HY007 here if multiple statements (separated by ;) were // submitted. This is not documented, but I've seen it with multiple successful inserts. PyObject* pError = GetErrorFromHandle(cur->cnxn, "SQLMoreResults", cur->cnxn->hdbc, cur->hstmt); // // free_results must be run after the error has been collected // from the cursor as it's lost otherwise. // If free_results raises an error (eg a lost connection) report that instead. // if (!free_results(cur, FREE_STATEMENT | KEEP_PREPARED)) { return 0; } // // Return any error from the GetErrorFromHandle call above. // if (pError) { RaiseErrorFromException(pError); Py_DECREF(pError); return 0; } // // Not clear how we'd get here, but if we're in an error state // without an error, behave as if we had no nextset // Py_RETURN_FALSE; } // Must retrieve DiagRecs immediately after SQLMoreResults if (ret == SQL_SUCCESS_WITH_INFO) { GetDiagRecs(cur); } else { Py_XDECREF(cur->messages); cur->messages = PyList_New(0); } SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { // Note: The SQL Server driver sometimes returns HY007 here if multiple statements (separated by ;) were // submitted. This is not documented, but I've seen it with multiple successful inserts. PyObject* pError = GetErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); free_results(cur, FREE_STATEMENT | KEEP_PREPARED | KEEP_MESSAGES); return pError; } free_results(cur, KEEP_STATEMENT | KEEP_PREPARED | KEEP_MESSAGES); if (cCols != 0) { // A result set was created. if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, lowercase())) return 0; } SQLLEN cRows; Py_BEGIN_ALLOW_THREADS ret = SQLRowCount(cur->hstmt, &cRows); Py_END_ALLOW_THREADS cur->rowcount = (int)cRows; if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLRowCount", cur->cnxn->hdbc, cur->hstmt); Py_RETURN_TRUE; } static char procedureColumns_doc[] = "C.procedureColumns(procedure=None, catalog=None, schema=None) --> self\n\n" "Executes SQLProcedureColumns and creates a result set of information\n" "about stored procedure columns and results.\n" " 0) procedure_cat\n" " 1) procedure_schem\n" " 2) procedure_name\n" " 3) column_name\n" " 4) column_type\n" " 5) data_type\n" " 6) type_name\n" " 7) column_size\n" " 8) buffer_length\n" " 9) decimal_digits\n" " 10) num_prec_radix\n" " 11) nullable\n" " 12) remarks\n" " 13) column_def\n" " 14) sql_data_type\n" " 15) sql_datetime_sub\n" " 16) char_octet_length\n" " 17) ordinal_position\n" " 18) is_nullable"; char* Cursor_procedureColumns_kwnames[] = { "procedure", "catalog", "schema", 0 }; static PyObject* Cursor_procedureColumns(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szProcedure = 0; const char* szCatalog = 0; const char* szSchema = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|zzz", Cursor_procedureColumns_kwnames, &szProcedure, &szCatalog, &szSchema)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLProcedureColumns(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szProcedure, SQL_NTS, 0, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLProcedureColumns", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char procedures_doc[] = "C.procedures(procedure=None, catalog=None, schema=None) --> self\n\n" "Executes SQLProcedures and creates a result set of information about the\n" "procedures in the data source.\n" "Each row fetched has the following columns:\n" " 0) procedure_cat\n" " 1) procedure_schem\n" " 2) procedure_name\n" " 3) num_input_params\n" " 4) num_output_params\n" " 5) num_result_sets\n" " 6) remarks\n" " 7) procedure_type"; char* Cursor_procedures_kwnames[] = { "procedure", "catalog", "schema", 0 }; static PyObject* Cursor_procedures(PyObject* self, PyObject* args, PyObject* kwargs) { const char* szProcedure = 0; const char* szCatalog = 0; const char* szSchema = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|zzz", Cursor_procedures_kwnames, &szProcedure, &szCatalog, &szSchema)) return 0; Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); if (!free_results(cur, FREE_STATEMENT | FREE_PREPARED)) return 0; SQLRETURN ret = 0; Py_BEGIN_ALLOW_THREADS ret = SQLProcedures(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szProcedure, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLProcedures", cur->cnxn->hdbc, cur->hstmt); SQLSMALLINT cCols; Py_BEGIN_ALLOW_THREADS ret = SQLNumResultCols(cur->hstmt, &cCols); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); if (!PrepareResults(cur, cCols)) return 0; if (!create_name_map(cur, cCols, true)) return 0; // Return the cursor so the results can be iterated over directly. Py_INCREF(cur); return (PyObject*)cur; } static char skip_doc[] = "skip(count) --> None\n" \ "\n" \ "Skips the next `count` records by calling SQLFetchScroll with SQL_FETCH_NEXT.\n" "For convenience, skip(0) is accepted and will do nothing."; static PyObject* Cursor_skip(PyObject* self, PyObject* args) { Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); if (!cursor) return 0; int count; if (!PyArg_ParseTuple(args, "i", &count)) return 0; if (count == 0) Py_RETURN_NONE; // Note: I'm not sure about the performance implications of looping here -- I certainly would rather use // SQLFetchScroll(SQL_FETCH_RELATIVE, count), but it requires scrollable cursors which are often slower. I would // not expect skip to be used in performance intensive code since different SQL would probably be the "right" // answer instead of skip anyway. SQLRETURN ret = SQL_SUCCESS; Py_BEGIN_ALLOW_THREADS for (int i = 0; i < count && SQL_SUCCEEDED(ret); i++) ret = SQLFetchScroll(cursor->hstmt, SQL_FETCH_NEXT, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret) && ret != SQL_NO_DATA) return RaiseErrorFromHandle(cursor->cnxn, "SQLFetchScroll", cursor->cnxn->hdbc, cursor->hstmt); Py_RETURN_NONE; } static const char* commit_doc = "Commits any pending transaction to the database on the current connection,\n" "including those from other cursors.\n"; static PyObject* Cursor_commit(PyObject* self, PyObject* args) { Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cur) return 0; return Connection_endtrans(cur->cnxn, SQL_COMMIT); } static char rollback_doc[] = "Rolls back any pending transaction to the database on the current connection,\n" "including those from other cursors.\n"; static PyObject* Cursor_rollback(PyObject* self, PyObject* args) { Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cur) return 0; return Connection_endtrans(cur->cnxn, SQL_ROLLBACK); } static char cancel_doc[] = "Cursor.cancel() -> None\n" "Cancels the processing of the current statement.\n" "\n" "Cancels the processing of the current statement.\n" "\n" "This calls SQLCancel and is designed to be called from another thread to" "stop processing of an ongoing query."; static PyObject* Cursor_cancel(PyObject* self, PyObject* args) { UNUSED(args); Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cur) return 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLCancel(cur->hstmt); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLCancel", cur->cnxn->hdbc, cur->hstmt); Py_RETURN_NONE; } static PyObject* Cursor_ignored(PyObject* self, PyObject* args) { UNUSED(self, args); Py_RETURN_NONE; } static char rowcount_doc[] = "This read-only attribute specifies the number of rows the last DML statement\n" " (INSERT, UPDATE, DELETE) affected. This is set to -1 for SELECT statements."; static char description_doc[] = "This read-only attribute is a sequence of 7-item sequences. Each of these\n" \ "sequences contains information describing one result column: (name, type_code,\n" \ "display_size, internal_size, precision, scale, null_ok). All values except\n" \ "name, type_code, and internal_size are None. The type_code entry will be the\n" \ "type object used to create values for that column (e.g. `str` or\n" \ "`datetime.datetime`).\n" \ "\n" \ "This attribute will be None for operations that do not return rows or if the\n" \ "cursor has not had an operation invoked via the execute() method yet.\n" \ "\n" \ "The type_code can be interpreted by comparing it to the Type Objects defined in\n" \ "the DB API and defined the pyodbc module: Date, Time, Timestamp, Binary,\n" \ "STRING, BINARY, NUMBER, and DATETIME."; static char arraysize_doc[] = "This read/write attribute specifies the number of rows to fetch at a time with\n" \ "fetchmany(). It defaults to 1 meaning to fetch a single row at a time."; static char connection_doc[] = "This read-only attribute return a reference to the Connection object on which\n" \ "the cursor was created.\n" \ "\n" \ "The attribute simplifies writing polymorph code in multi-connection\n" \ "environments."; static char fastexecmany_doc[] = "This read/write attribute specifies whether to use a faster executemany() which\n" \ "uses parameter arrays. Not all drivers may work with this implementation."; static char messages_doc[] = "This read-only attribute is a list of all the diagnostic messages in the\n" \ "current result set."; static PyMemberDef Cursor_members[] = { {"rowcount", T_INT, offsetof(Cursor, rowcount), READONLY, rowcount_doc }, {"description", T_OBJECT_EX, offsetof(Cursor, description), READONLY, description_doc }, {"arraysize", T_INT, offsetof(Cursor, arraysize), 0, arraysize_doc }, {"connection", T_OBJECT_EX, offsetof(Cursor, cnxn), READONLY, connection_doc }, {"fast_executemany",T_BOOL, offsetof(Cursor, fastexecmany), 0, fastexecmany_doc }, {"messages", T_OBJECT_EX, offsetof(Cursor, messages), READONLY, messages_doc }, { 0 } }; static PyObject* Cursor_getnoscan(PyObject* self, void *closure) { UNUSED(closure); Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; SQLULEN noscan = SQL_NOSCAN_OFF; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetStmtAttr(cursor->hstmt, SQL_ATTR_NOSCAN, (SQLPOINTER)&noscan, sizeof(SQLULEN), 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { // Not supported? We're going to assume 'no'. Py_RETURN_FALSE; } if (noscan == SQL_NOSCAN_OFF) Py_RETURN_FALSE; Py_RETURN_TRUE; } static int Cursor_setnoscan(PyObject* self, PyObject* value, void *closure) { UNUSED(closure); Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return -1; if (value == 0) { PyErr_SetString(PyExc_TypeError, "Cannot delete the noscan attribute"); return -1; } uintptr_t noscan = PyObject_IsTrue(value) ? SQL_NOSCAN_ON : SQL_NOSCAN_OFF; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLSetStmtAttr(cursor->hstmt, SQL_ATTR_NOSCAN, (SQLPOINTER)noscan, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cursor->cnxn, "SQLSetStmtAttr(SQL_ATTR_NOSCAN)", cursor->cnxn->hdbc, cursor->hstmt); return -1; } return 0; } static PyGetSetDef Cursor_getsetters[] = { {"noscan", Cursor_getnoscan, Cursor_setnoscan, "NOSCAN statement attr", 0}, { 0 } }; static char executemany_doc[] = "executemany(sql, seq_of_params) --> Cursor | count | None\n" \ "\n" \ "Prepare a database query or command and then execute it against all parameter\n" \ "sequences found in the sequence seq_of_params.\n" \ "\n" \ "Only the result of the final execution is returned. See `execute` for a\n" \ "description of parameter passing the return value."; static char nextset_doc[] = "nextset() --> True | None\n" \ "\n" \ "Jumps to the next resultset if the last sql has multiple resultset." \ "Returns True if there is a next resultset otherwise None."; static char ignored_doc[] = "Ignored."; static char fetchval_doc[] = "fetchval() --> value | None\n" \ "\n" "Returns the first column of the next row in the result set or None\n" \ "if there are no more rows."; static char fetchone_doc[] = "fetchone() --> Row | None\n" \ "\n" \ "Fetch the next row of a query result set, returning a single Row instance, or\n" \ "None when no more data is available.\n" \ "\n" \ "A ProgrammingError exception is raised if the previous call to execute() did\n" \ "not produce any result set or no call was issued yet."; static char fetchmany_doc[] = "fetchmany(size=cursor.arraysize) --> list of Rows\n" \ "\n" \ "Fetch the next set of rows of a query result, returning a list of Row\n" \ "instances. An empty list is returned when no more rows are available.\n" \ "\n" \ "The number of rows to fetch per call is specified by the parameter. If it is\n" \ "not given, the cursor's arraysize determines the number of rows to be\n" \ "fetched. The method should try to fetch as many rows as indicated by the size\n" \ "parameter. If this is not possible due to the specified number of rows not\n" \ "being available, fewer rows may be returned.\n" \ "\n" \ "A ProgrammingError exception is raised if the previous call to execute() did\n" \ "not produce any result set or no call was issued yet."; static char fetchall_doc[] = "fetchall() --> list of Rows\n" \ "\n" \ "Fetch all remaining rows of a query result, returning them as a list of Rows.\n" \ "An empty list is returned if there are no more rows.\n" \ "\n" \ "A ProgrammingError exception is raised if the previous call to execute() did\n" \ "not produce any result set or no call was issued yet."; static char setinputsizes_doc[] = "setinputsizes(sizes) -> None\n" \ "\n" \ "Sets the type information to be used when binding parameters.\n" \ "sizes must be a sequence of values, one for each input parameter.\n" \ "Each value may be an integer to override the column size when binding character\n" \ "data, a Type Object to override the SQL type, or a sequence of integers to specify\n" \ "(SQL type, column size, decimal digits) where any may be none to use the default.\n" \ "\n" \ "Parameters beyond the length of the sequence will be bound with the defaults.\n" \ "Setting sizes to None reverts all parameters to the defaults."; static char enter_doc[] = "__enter__() -> self."; static PyObject* Cursor_enter(PyObject* self, PyObject* args) { UNUSED(args); Py_INCREF(self); return self; } static char exit_doc[] = "__exit__(*excinfo) -> None. Commits the connection if necessary.."; static PyObject* Cursor_exit(PyObject* self, PyObject* args) { Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); if (!cursor) return 0; // If an error has occurred, `args` will be a tuple of 3 values. Otherwise it will be a tuple of 3 `None`s. I(PyTuple_Check(args)); if (cursor->cnxn->nAutoCommit == SQL_AUTOCOMMIT_OFF && PyTuple_GetItem(args, 0) == Py_None) { SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLEndTran(SQL_HANDLE_DBC, cursor->cnxn->hdbc, SQL_COMMIT); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cursor->cnxn, "SQLEndTran(SQL_COMMIT)", cursor->cnxn->hdbc, cursor->hstmt); } Py_RETURN_NONE; } static PyMethodDef Cursor_methods[] = { { "close", (PyCFunction)Cursor_close, METH_NOARGS, close_doc }, { "execute", (PyCFunction)Cursor_execute, METH_VARARGS, execute_doc }, { "executemany", (PyCFunction)Cursor_executemany, METH_VARARGS, executemany_doc }, { "setinputsizes", (PyCFunction)Cursor_setinputsizes, METH_O, setinputsizes_doc }, { "setoutputsize", (PyCFunction)Cursor_ignored, METH_VARARGS, ignored_doc }, { "fetchval", (PyCFunction)Cursor_fetchval, METH_NOARGS, fetchval_doc }, { "fetchone", (PyCFunction)Cursor_fetchone, METH_NOARGS, fetchone_doc }, { "fetchall", (PyCFunction)Cursor_fetchall, METH_NOARGS, fetchall_doc }, { "fetchmany", (PyCFunction)Cursor_fetchmany, METH_VARARGS, fetchmany_doc }, { "nextset", (PyCFunction)Cursor_nextset, METH_NOARGS, nextset_doc }, { "tables", (PyCFunction)Cursor_tables, METH_VARARGS|METH_KEYWORDS, tables_doc }, { "columns", (PyCFunction)Cursor_columns, METH_VARARGS|METH_KEYWORDS, columns_doc }, { "statistics", (PyCFunction)Cursor_statistics, METH_VARARGS|METH_KEYWORDS, statistics_doc }, { "rowIdColumns", (PyCFunction)Cursor_rowIdColumns, METH_VARARGS|METH_KEYWORDS, rowIdColumns_doc }, { "rowVerColumns", (PyCFunction)Cursor_rowVerColumns, METH_VARARGS|METH_KEYWORDS, rowVerColumns_doc }, { "primaryKeys", (PyCFunction)Cursor_primaryKeys, METH_VARARGS|METH_KEYWORDS, primaryKeys_doc }, { "foreignKeys", (PyCFunction)Cursor_foreignKeys, METH_VARARGS|METH_KEYWORDS, foreignKeys_doc }, { "getTypeInfo", (PyCFunction)Cursor_getTypeInfo, METH_VARARGS|METH_KEYWORDS, getTypeInfo_doc }, { "procedures", (PyCFunction)Cursor_procedures, METH_VARARGS|METH_KEYWORDS, procedures_doc }, { "procedureColumns", (PyCFunction)Cursor_procedureColumns, METH_VARARGS|METH_KEYWORDS, procedureColumns_doc }, { "skip", (PyCFunction)Cursor_skip, METH_VARARGS, skip_doc }, { "commit", (PyCFunction)Cursor_commit, METH_NOARGS, commit_doc }, { "rollback", (PyCFunction)Cursor_rollback, METH_NOARGS, rollback_doc }, {"cancel", (PyCFunction)Cursor_cancel, METH_NOARGS, cancel_doc}, {"__enter__", Cursor_enter, METH_NOARGS, enter_doc }, {"__exit__", Cursor_exit, METH_VARARGS, exit_doc }, {0, 0, 0, 0} }; static char cursor_doc[] = "Cursor objects represent a database cursor, which is used to manage the context\n" \ "of a fetch operation. Cursors created from the same connection are not\n" \ "isolated, i.e., any changes done to the database by a cursor are immediately\n" \ "visible by the other cursors. Cursors created from different connections are\n" \ "isolated.\n" \ "\n" \ "Cursors implement the iterator protocol, so results can be iterated:\n" \ "\n" \ " cursor.execute(sql)\n" \ " for row in cursor:\n" \ " print row[0]"; PyTypeObject CursorType = { PyVarObject_HEAD_INIT(0, 0) "pyodbc.Cursor", // tp_name sizeof(Cursor), // tp_basicsize 0, // tp_itemsize (destructor)Cursor_dealloc, // destructor tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare 0, // tp_repr 0, // tp_as_number 0, // tp_as_sequence 0, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer #if defined(Py_TPFLAGS_HAVE_ITER) Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_ITER, #else Py_TPFLAGS_DEFAULT, #endif cursor_doc, // tp_doc 0, // tp_traverse 0, // tp_clear 0, // tp_richcompare 0, // tp_weaklistoffset Cursor_iter, // tp_iter Cursor_iternext, // tp_iternext Cursor_methods, // tp_methods Cursor_members, // tp_members Cursor_getsetters, // tp_getset 0, // tp_base 0, // tp_dict 0, // tp_descr_get 0, // tp_descr_set 0, // tp_dictoffset 0, // tp_init 0, // tp_alloc 0, // tp_new 0, // tp_free 0, // tp_is_gc 0, // tp_bases 0, // tp_mro 0, // tp_cache 0, // tp_subclasses 0, // tp_weaklist }; Cursor* Cursor_New(Connection* cnxn) { // Exported to allow the connection class to create cursors. #ifdef _MSC_VER #pragma warning(disable : 4365) #endif Cursor* cur = PyObject_NEW(Cursor, &CursorType); #ifdef _MSC_VER #pragma warning(default : 4365) #endif if (cur) { cur->cnxn = cnxn; cur->hstmt = SQL_NULL_HANDLE; cur->description = Py_None; cur->pPreparedSQL = 0; cur->paramcount = 0; cur->paramtypes = 0; cur->paramInfos = 0; cur->inputsizes = 0; cur->colinfos = 0; cur->arraysize = 1; cur->rowcount = -1; cur->map_name_to_index = 0; cur->fastexecmany = 0; cur->messages = Py_None; Py_INCREF(cnxn); Py_INCREF(cur->description); Py_INCREF(cur->messages); SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLAllocHandle(SQL_HANDLE_STMT, cnxn->hdbc, &cur->hstmt); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLAllocHandle", cnxn->hdbc, SQL_NULL_HANDLE); Py_DECREF(cur); return 0; } if (cnxn->timeout) { Py_BEGIN_ALLOW_THREADS ret = SQLSetStmtAttr(cur->hstmt, SQL_ATTR_QUERY_TIMEOUT, (SQLPOINTER)(uintptr_t)cnxn->timeout, 0); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cnxn, "SQLSetStmtAttr(SQL_ATTR_QUERY_TIMEOUT)", cnxn->hdbc, cur->hstmt); Py_DECREF(cur); return 0; } } TRACE("cursor.new cnxn=%p hdbc=%d cursor=%p hstmt=%d\n", (Connection*)cur->cnxn, ((Connection*)cur->cnxn)->hdbc, cur, cur->hstmt); } return cur; } void Cursor_init() { PyDateTime_IMPORT; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/cursor.h0000664000175000017500000001450100000000000016701 0ustar00mkleehammermkleehammer/* * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef CURSOR_H #define CURSOR_H struct Connection; struct ColumnInfo { SQLSMALLINT sql_type; // The column size from SQLDescribeCol. For character types, this is the maximum length, not including the NULL // terminator. For binary values, this is the maximum length. For numeric and decimal values, it is the defined // number of digits. For example, the precision of a column defined as NUMERIC(10,3) is 10. // // This value can be SQL_NO_TOTAL in which case the driver doesn't know the maximum length, such as for LONGVARCHAR // fields. SQLULEN column_size; // Tells us if an integer type is signed or unsigned. This is determined after a query using SQLColAttribute. All // of the integer types are the same size whether signed and unsigned, so we can allocate memory ahead of time // without knowing this. We use this during the fetch when converting to a Python integer or long. bool is_unsigned; }; struct ParamInfo { // The following correspond to the SQLBindParameter parameters. SQLSMALLINT ValueType; SQLSMALLINT ParameterType; SQLULEN ColumnSize; SQLSMALLINT DecimalDigits; // The value pointer that will be bound. If `alloc` is true, this was allocated with malloc and must be freed. // Otherwise it is zero or points into memory owned by the original Python parameter. SQLPOINTER ParameterValuePtr; SQLLEN BufferLength; SQLLEN StrLen_or_Ind; // If true, the memory in ParameterValuePtr was allocated via malloc and must be freed. bool allocated; PyObject* pObject; // An optional object that will be decremented at the end of the execute. // This is useful when the ParameterValuePtr data is in a Python object - // the object can be put here (and INCREFed if necessary!) instead of // copying the data out. // // If SQLPutData is used, this must be set to a bytes or bytearray object! SQLLEN maxlength; // If SQLPutData is being used, this must be set to the amount that can be // written to each SQLPutData call. (It is not clear if they are limited // like SQLBindParameter or not.) // For TVPs, the nested descriptors and current row. struct ParamInfo *nested; SQLLEN curTvpRow; // Optional data. If used, ParameterValuePtr will point into this. union { unsigned char ch; int i32; INT64 i64; double dbl; TIMESTAMP_STRUCT timestamp; DATE_STRUCT date; TIME_STRUCT time; } Data; }; struct Cursor { PyObject_HEAD // The Connection object (which is a PyObject) that created this cursor. Connection* cnxn; // Set to SQL_NULL_HANDLE when the cursor is closed. HSTMT hstmt; // // SQL Parameters // // If non-zero, a pointer to the previously prepared SQL string, allowing us to skip the prepare and gathering of // parameter data. PyObject* pPreparedSQL; // The number of parameter markers in pPreparedSQL. This will be zero when pPreparedSQL is zero but is set // immediately after preparing the SQL. int paramcount; // If non-zero, a pointer to an array of SQL type values allocated via malloc. This is zero until we actually ask // for the type of parameter, which is only when a parameter is None (NULL). At that point, the entire array is // allocated (length == paramcount) but all entries are set to SQL_UNKNOWN_TYPE. SQLSMALLINT* paramtypes; // If non-zero, a pointer to a buffer containing the actual parameters bound. If pPreparedSQL is zero, this should // be freed using free and set to zero. // // Even if the same SQL statement is executed twice, the parameter bindings are redone from scratch since we try to // bind into the Python objects directly. ParamInfo* paramInfos; // Parameter set array (used with executemany) unsigned char *paramArray; // Whether to use fast executemany with parameter arrays and other optimisations char fastexecmany; // The list of information for setinputsizes(). PyObject *inputsizes; // // Result Information // // An array of ColumnInfos, allocated via malloc. This will be zero when closed or when there are no query // results. ColumnInfo* colinfos; // The description tuple described in the DB API 2.0 specification. Set to None when there are no results. PyObject* description; int arraysize; // The Cursor.rowcount attribute from the DB API specification. int rowcount; // A dictionary that maps from column name (PyString) to index into the result columns (PyInteger). This is // constructued during an execute and shared with each row (reference counted) to implement accessing results by // column name. // // This duplicates some ODBC functionality, but allows us to use Row objects after the statement is closed and // should use less memory than putting each column into the Row's __dict__. // // Since this is shared by Row objects, it cannot be reused. New dictionaries are created for every execute. This // will be zero whenever there are no results. PyObject* map_name_to_index; // The messages attribute described in the DB API 2.0 specification. // Contains a list of all non-data messages provided by the driver, retrieved using SQLGetDiagRec. PyObject* messages; }; void Cursor_init(); Cursor* Cursor_New(Connection* cnxn); PyObject* Cursor_execute(PyObject* self, PyObject* args); #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/dbspecific.h0000664000175000017500000000252500000000000017462 0ustar00mkleehammermkleehammer#ifndef DBSPECIFIC_H #define DBSPECIFIC_H // Items specific to databases. // // Obviously we'd like to minimize this, but if they are needed this file isolates them. I'd like for there to be a // single build of pyodbc on each platform and not have a bunch of defines for supporting different databases. // --------------------------------------------------------------------------------------------------------------------- // SQL Server #define SQL_SS_XML -152 // SQL Server 2005 XML type #define SQL_DB2_DECFLOAT -360 // IBM DB/2 DECFLOAT type #define SQL_DB2_XML -370 // IBM DB/2 XML type #define SQL_SS_TIME2 -154 // SQL Server 2008 time type struct SQL_SS_TIME2_STRUCT { SQLUSMALLINT hour; SQLUSMALLINT minute; SQLUSMALLINT second; SQLUINTEGER fraction; }; // The SQLGUID type isn't always available when compiling, so we'll make our own with a // different name. struct PYSQLGUID { // I was hoping to use uint32_t, etc., but they aren't included in a Python build. I'm not // going to require that the compilers supply anything beyond that. There is PY_UINT32_T, // but there is no 16-bit version. We'll stick with Microsoft's WORD and DWORD which I // believe the ODBC headers will have to supply. DWORD Data1; WORD Data2; WORD Data3; byte Data4[8]; }; #endif // DBSPECIFIC_H ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1622310926.0 pyodbc-4.0.32/src/errors.cpp0000664000175000017500000002504200000000000017235 0ustar00mkleehammermkleehammer #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "connection.h" #include "errors.h" #include "pyodbcmodule.h" // Exceptions struct SqlStateMapping { char* prefix; size_t prefix_len; PyObject** pexc_class; // Note: Double indirection (pexc_class) necessary because the pointer values are not // initialized during startup }; static const struct SqlStateMapping sql_state_mapping[] = { { "01002", 5, &OperationalError }, { "08001", 5, &OperationalError }, { "08003", 5, &OperationalError }, { "08004", 5, &OperationalError }, { "08007", 5, &OperationalError }, { "08S01", 5, &OperationalError }, { "0A000", 5, &NotSupportedError }, { "28000", 5, &InterfaceError }, { "40002", 5, &IntegrityError }, { "22", 2, &DataError }, { "23", 2, &IntegrityError }, { "24", 2, &ProgrammingError }, { "25", 2, &ProgrammingError }, { "42", 2, &ProgrammingError }, { "HY001", 5, &OperationalError }, { "HY014", 5, &OperationalError }, { "HYT00", 5, &OperationalError }, { "HYT01", 5, &OperationalError }, { "IM001", 5, &InterfaceError }, { "IM002", 5, &InterfaceError }, { "IM003", 5, &InterfaceError }, }; static PyObject* ExceptionFromSqlState(const char* sqlstate) { // Returns the appropriate Python exception class given a SQLSTATE value. if (sqlstate && *sqlstate) { for (size_t i = 0; i < _countof(sql_state_mapping); i++) if (memcmp(sqlstate, sql_state_mapping[i].prefix, sql_state_mapping[i].prefix_len) == 0) return *sql_state_mapping[i].pexc_class; } return Error; } PyObject* RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* format, ...) { PyObject *pAttrs = 0, *pError = 0; if (!sqlstate || !*sqlstate) sqlstate = "HY000"; if (!exc_class) exc_class = ExceptionFromSqlState(sqlstate); // Note: Don't use any native strprintf routines. With Py_ssize_t, we need "%zd", but VC .NET doesn't support it. // PyString_FromFormatV already takes this into account. va_list marker; va_start(marker, format); PyObject* pMsg = PyString_FromFormatV(format, marker); va_end(marker); if (!pMsg) { PyErr_NoMemory(); return 0; } // Create an exception with a 'sqlstate' attribute (set to None if we don't have one) whose 'args' attribute is a // tuple containing the message and sqlstate value. The 'sqlstate' attribute ensures it is easy to access in // Python (and more understandable to the reader than ex.args[1]), but putting it in the args ensures it shows up // in logs because of the default repr/str. pAttrs = Py_BuildValue("(Os)", pMsg, sqlstate); if (pAttrs) { pError = PyEval_CallObject(exc_class, pAttrs); if (pError) RaiseErrorFromException(pError); } Py_DECREF(pMsg); Py_XDECREF(pAttrs); Py_XDECREF(pError); return 0; } #if PY_MAJOR_VERSION < 3 #define PyString_CompareWithASCIIString(lhs, rhs) _strcmpi(PyString_AS_STRING(lhs), rhs) #else #define PyString_CompareWithASCIIString PyUnicode_CompareWithASCIIString #endif bool HasSqlState(PyObject* ex, const char* szSqlState) { // Returns true if `ex` is an exception and has the given SQLSTATE. It is safe to pass 0 for ex. bool has = false; if (ex) { PyObject* args = PyObject_GetAttrString(ex, "args"); if (args != 0) { PyObject* s = PySequence_GetItem(args, 1); if (s != 0 && PyString_Check(s)) { // const char* sz = PyString_AsString(s); // if (sz && _strcmpi(sz, szSqlState) == 0) // has = true; has = (PyString_CompareWithASCIIString(s, szSqlState) == 0); } Py_XDECREF(s); Py_DECREF(args); } } return has; } static PyObject* GetError(const char* sqlstate, PyObject* exc_class, PyObject* pMsg) { // pMsg // The error message. This function takes ownership of this object, so we'll free it if we fail to create an // error. PyObject *pSqlState=0, *pAttrs=0, *pError=0; if (!sqlstate || !*sqlstate) sqlstate = "HY000"; if (!exc_class) exc_class = ExceptionFromSqlState(sqlstate); pAttrs = PyTuple_New(2); if (!pAttrs) { Py_DECREF(pMsg); return 0; } PyTuple_SetItem(pAttrs, 1, pMsg); // pAttrs now owns the pMsg reference; steals a reference, does not increment pSqlState = PyString_FromString(sqlstate); if (!pSqlState) { Py_DECREF(pAttrs); return 0; } PyTuple_SetItem(pAttrs, 0, pSqlState); // pAttrs now owns the pSqlState reference pError = PyEval_CallObject(exc_class, pAttrs); // pError will incref pAttrs Py_XDECREF(pAttrs); return pError; } static const char* DEFAULT_ERROR = "The driver did not supply an error!"; PyObject* RaiseErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc, HSTMT hstmt) { // The exception is "set" in the interpreter. This function returns 0 so this can be used in a return statement. PyObject* pError = GetErrorFromHandle(conn, szFunction, hdbc, hstmt); if (pError) { RaiseErrorFromException(pError); Py_DECREF(pError); } return 0; } PyObject* GetErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc, HSTMT hstmt) { TRACE("In RaiseError(%s)!\n", szFunction); // Creates and returns an exception from ODBC error information. // // ODBC can generate a chain of errors which we concatenate into one error message. We use the SQLSTATE from the // first message, which seems to be the most detailed, to determine the class of exception. // // If the function fails, for example, if it runs out of memory, zero is returned. // // szFunction // The name of the function that failed. Python generates a useful stack trace, but we often don't know where in // the C++ code we failed. SQLSMALLINT nHandleType; SQLHANDLE h; char sqlstate[6] = ""; SQLINTEGER nNativeError; SQLSMALLINT cchMsg; ODBCCHAR sqlstateT[6]; SQLSMALLINT msgLen = 1023; ODBCCHAR *szMsg = (ODBCCHAR*) pyodbc_malloc((msgLen + 1) * sizeof(ODBCCHAR)); if (!szMsg) { PyErr_NoMemory(); return 0; } if (hstmt != SQL_NULL_HANDLE) { nHandleType = SQL_HANDLE_STMT; h = hstmt; } else if (hdbc != SQL_NULL_HANDLE) { nHandleType = SQL_HANDLE_DBC; h = hdbc; } else { nHandleType = SQL_HANDLE_ENV; h = henv; } // unixODBC + PostgreSQL driver 07.01.0003 (Fedora 8 binaries from RPMs) crash if you call SQLGetDiagRec more // than once. I hate to do this, but I'm going to only call it once for non-Windows platforms for now... SQLSMALLINT iRecord = 1; Object msg; for (;;) { szMsg[0] = 0; sqlstateT[0] = 0; nNativeError = 0; cchMsg = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetDiagRecW(nHandleType, h, iRecord, (SQLWCHAR*)sqlstateT, &nNativeError, (SQLWCHAR*)szMsg, msgLen, &cchMsg); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) break; // If needed, allocate a bigger error message buffer and retry. if (cchMsg > msgLen - 1) { msgLen = cchMsg + 1; if (!pyodbc_realloc((BYTE**) &szMsg, (msgLen + 1) * sizeof(ODBCCHAR))) { PyErr_NoMemory(); pyodbc_free(szMsg); return 0; } Py_BEGIN_ALLOW_THREADS ret = SQLGetDiagRecW(nHandleType, h, iRecord, (SQLWCHAR*)sqlstateT, &nNativeError, (SQLWCHAR*)szMsg, msgLen, &cchMsg); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) break; } // Not always NULL terminated (MS Access) sqlstateT[5] = 0; // For now, default to UTF-16 if this is not in the context of a connection. // Note that this will not work if the DM is using a different wide encoding (e.g. UTF-32). const char *unicode_enc = conn ? conn->metadata_enc.name : ENCSTR_UTF16NE; Object msgStr(PyUnicode_Decode((char*)szMsg, cchMsg * sizeof(ODBCCHAR), unicode_enc, "strict")); if (cchMsg != 0 && msgStr.Get()) { if (iRecord == 1) { // This is the first error message, so save the SQLSTATE for determining the // exception class and append the calling function name. CopySqlState(sqlstateT, sqlstate); msg = PyUnicode_FromFormat("[%s] %V (%ld) (%s)", sqlstate, msgStr.Get(), "(null)", (long)nNativeError, szFunction); if (!msg) { PyErr_NoMemory(); pyodbc_free(szMsg); return 0; } } else { // This is not the first error message, so append to the existing one. Object more(PyUnicode_FromFormat("; [%s] %V (%ld)", sqlstate, msgStr.Get(), "(null)", (long)nNativeError)); if (!more) break; // Something went wrong, but we'll return the msg we have so far Object both(PyUnicode_Concat(msg, more)); if (!both) break; msg = both.Detach(); } } iRecord++; #ifndef _MSC_VER // See non-Windows comment above break; #endif } // Raw message buffer not needed anymore pyodbc_free(szMsg); if (!msg || PyUnicode_GetSize(msg.Get()) == 0) { // This only happens using unixODBC. (Haven't tried iODBC yet.) Either the driver or the driver manager is // buggy and has signaled a fault without recording error information. sqlstate[0] = '\0'; msg = PyString_FromString(DEFAULT_ERROR); if (!msg) { PyErr_NoMemory(); return 0; } } return GetError(sqlstate, 0, msg.Detach()); } static bool GetSqlState(HSTMT hstmt, char* szSqlState) { SQLSMALLINT cchMsg; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetDiagField(SQL_HANDLE_STMT, hstmt, 1, SQL_DIAG_SQLSTATE, (SQLCHAR*)szSqlState, 5, &cchMsg); Py_END_ALLOW_THREADS return SQL_SUCCEEDED(ret); } bool HasSqlState(HSTMT hstmt, const char* szSqlState) { char szActual[6]; if (!GetSqlState(hstmt, szActual)) return false; return memcmp(szActual, szSqlState, 5) == 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/errors.h0000664000175000017500000000756000000000000016707 0ustar00mkleehammermkleehammer #ifndef _ERRORS_H_ #define _ERRORS_H_ // Sets an exception based on the ODBC SQLSTATE and error message and returns zero. If either handle is not available, // pass SQL_NULL_HANDLE. // // conn // The connection object, from which it will use the Unicode encoding. May be null if not available. // // szFunction // The name of the function that failed. Python generates a useful stack trace, but we often don't know where in the // C++ code we failed. // PyObject* RaiseErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc, HSTMT hstmt); // Sets an exception using a printf-like error message. // // szSqlState // The optional SQLSTATE reported by ODBC. If not provided (sqlstate is NULL or sqlstate[0] is NULL), "HY000" // (General Error) is used. Note that HY000 causes Error to be used if exc_class is not provided. // // exc_class // The optional exception class (DatabaseError, etc.) to construct. If NULL, the appropriate class will be // determined from the SQLSTATE. // PyObject* RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* format, ...); // Constructs an exception and returns it. // // This function is like RaiseErrorFromHandle, but gives you the ability to examine the error first (in particular, // used to examine the SQLSTATE using HasSqlState). If you want to use the error, call PyErr_SetObject(ex->ob_type, // ex). Otherwise, dispose of the error using Py_DECREF(ex). // // conn // The connection object, from which it will use the Unicode encoding. May be null if not available. // // szFunction // The name of the function that failed. Python generates a useful stack trace, but we often don't know where in the // C++ code we failed. // PyObject* GetErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc, HSTMT hstmt); // Returns true if `ex` is a database exception with SQLSTATE `szSqlState`. Returns false otherwise. // // It is safe to call with ex set to zero. The SQLSTATE comparison is case-insensitive. // bool HasSqlState(PyObject* ex, const char* szSqlState); // Returns true if the HSTMT has a diagnostic record with the given SQLSTATE. This is used after SQLGetData call that // returned SQL_SUCCESS_WITH_INFO to see if it also has SQLSTATE 01004, indicating there is more data. // bool HasSqlState(HSTMT hstmt, const char* szSqlState); inline PyObject* RaiseErrorFromException(PyObject* pError) { // PyExceptionInstance_Class doesn't exist in 2.4 #if PY_MAJOR_VERSION >= 3 PyErr_SetObject((PyObject*)Py_TYPE(pError), pError); #else PyObject* cls = (PyObject*)((PyInstance_Check(pError) ? (PyObject*)((PyInstanceObject*)pError)->in_class : (PyObject*)(Py_TYPE(pError)))); PyErr_SetObject(cls, pError); #endif return 0; } inline void CopySqlState(const ODBCCHAR* src, char* dest) { // Copies a SQLSTATE read as SQLWCHAR into a character buffer. We know that SQLSTATEs are // composed of ASCII characters and we need one standard to compare when choosing // exceptions. // // Strangely, even when the error messages are UTF-8, PostgreSQL and MySQL encode the // sqlstate as UTF-16LE. We'll simply copy all non-zero bytes, with some checks for // running off the end of the buffers which will work for ASCII, UTF8, and UTF16 LE & BE. // It would work for UTF32 if I increase the size of the ODBCCHAR buffer to handle it. // // (In the worst case, if a driver does something totally weird, we'll have an incomplete // SQLSTATE.) // const char* pchSrc = (const char*)src; const char* pchSrcMax = pchSrc + sizeof(ODBCCHAR) * 5; char* pchDest = dest; // Where we are copying into dest char* pchDestMax = dest + 5; // We know a SQLSTATE is 5 characters long while (pchDest < pchDestMax && pchSrc < pchSrcMax) { if (*pchSrc) *pchDest++ = *pchSrc; pchSrc++; } *pchDest = 0; } #endif // _ERRORS_H_ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/getdata.cpp0000664000175000017500000006236100000000000017337 0ustar00mkleehammermkleehammer // The functions for reading a single value from the database using SQLGetData. There is a different function for // every data type. #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "pyodbcmodule.h" #include "cursor.h" #include "connection.h" #include "errors.h" #include "dbspecific.h" #include #include // NULL terminator notes: // // * pinfo->column_size, from SQLDescribeCol, does not include a NULL terminator. For example, column_size for a // char(10) column would be 10. (Also, when dealing with SQLWCHAR, it is the number of *characters*, not bytes.) // // * When passing a length to PyString_FromStringAndSize and similar Unicode functions, do not add the NULL // terminator -- it will be added automatically. See objects/stringobject.c // // * SQLGetData does not return the NULL terminator in the length indicator. (Therefore, you can pass this value // directly to the Python string functions.) // // * SQLGetData will write a NULL terminator in the output buffer, so you must leave room for it. You must also // include the NULL terminator in the buffer length passed to SQLGetData. // // ODBC generalization: // 1) Include NULL terminators in input buffer lengths. // 2) NULL terminators are not used in data lengths. void GetData_init() { PyDateTime_IMPORT; } static byte* ReallocOrFreeBuffer(byte* pb, Py_ssize_t cbNeed); inline bool IsBinaryType(SQLSMALLINT sqltype) { // Is this SQL type (e.g. SQL_VARBINARY) a binary type or not? switch (sqltype) { case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: return true; } return false; } inline bool IsWideType(SQLSMALLINT sqltype) { switch (sqltype) { case SQL_WCHAR: case SQL_WVARCHAR: case SQL_WLONGVARCHAR: case SQL_SS_XML: case SQL_DB2_XML: return true; } return false; } // TODO: Wont pyodbc_free crash if we didn't use pyodbc_realloc. static bool ReadVarColumn(Cursor* cur, Py_ssize_t iCol, SQLSMALLINT ctype, bool& isNull, byte*& pbResult, Py_ssize_t& cbResult) { // Called to read a variable-length column and return its data in a newly-allocated heap // buffer. // // Returns true if the read was successful and false if the read failed. If the read // failed a Python exception will have been set. // // If a non-null and non-empty value was read, pbResult will be set to a buffer containing // the data and cbResult will be set to the byte length. This length does *not* include a // null terminator. In this case the data *must* be freed using pyodbc_free. // // If a null value was read, isNull is set to true and pbResult and cbResult will be set to // 0. // // If a zero-length value was read, isNull is set to false and pbResult and cbResult will // be set to 0. isNull = false; pbResult = 0; cbResult = 0; const Py_ssize_t cbElement = (Py_ssize_t)(IsWideType(ctype) ? sizeof(ODBCCHAR) : 1); const Py_ssize_t cbNullTerminator = IsBinaryType(ctype) ? 0 : cbElement; // TODO: Make the initial allocation size configurable? Py_ssize_t cbAllocated = 4096; Py_ssize_t cbUsed = 0; byte* pb = (byte*)malloc((size_t)cbAllocated); if (!pb) { PyErr_NoMemory(); return false; } SQLRETURN ret = SQL_SUCCESS_WITH_INFO; do { // Call SQLGetData in a loop as long as it keeps returning partial data (ret == // SQL_SUCCESS_WITH_INFO). Each time through, update the buffer pb, cbAllocated, and // cbUsed. Py_ssize_t cbAvailable = cbAllocated - cbUsed; SQLLEN cbData = 0; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), ctype, &pb[cbUsed], (SQLLEN)cbAvailable, &cbData); Py_END_ALLOW_THREADS; TRACE("ReadVarColumn: SQLGetData avail=%d --> ret=%d cbData=%d\n", (int)cbAvailable, (int)ret, (int)cbData); if (!SQL_SUCCEEDED(ret) && ret != SQL_NO_DATA) { RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); return false; } if (ret == SQL_SUCCESS && (int)cbData < 0) { // HACK: FreeTDS 0.91 on OS/X returns -4 for NULL data instead of SQL_NULL_DATA // (-1). I've traced into the code and it appears to be the result of assigning -1 // to a SQLLEN. We are going to treat all negative values as NULL. ret = SQL_NULL_DATA; cbData = 0; } // SQLGetData behavior is incredibly quirky: It doesn't tell us the total, the total // we've read, or even the amount just read. It returns the amount just read, plus any // remaining. Unfortunately, the only way to pick them apart is to subtract out the // amount of buffer we supplied. if (ret == SQL_SUCCESS_WITH_INFO) { // This means we read some data, but there is more. SQLGetData is very weird - it // sets cbRead to the number of bytes we read *plus* the amount remaining. Py_ssize_t cbRemaining = 0; // How many more bytes do we need to allocate, not including null? Py_ssize_t cbRead = 0; // How much did we just read, not including null? if (cbData == SQL_NO_TOTAL) { // This special value indicates there is more data but the driver can't tell us // how much more, so we'll just add whatever we want and try again. It also // tells us, however, that the buffer is full, so the amount we read equals the // amount we offered. Remember that if the type requires a null terminator, it // will be added *every* time, not just at the end, so we need to subtract it. cbRead = (cbAvailable - cbNullTerminator); cbRemaining = 1024 * 1024; } else if ((Py_ssize_t)cbData >= cbAvailable) { // We offered cbAvailable space, but there was cbData data. The driver filled // the buffer with what it could. Remember that if the type requires a null // terminator, the driver is going to append one on *every* read, so we need to // subtract them out. At least we know the exact data amount now and we can // allocate a precise amount. cbRead = (cbAvailable - cbNullTerminator); cbRemaining = cbData - cbRead; } else { // I would not expect to get here - we apparently read all of the data but the // driver did not return SQL_SUCCESS? cbRead = (cbData - cbNullTerminator); cbRemaining = 0; } cbUsed += cbRead; if (cbRemaining > 0) { // This is a tiny bit complicated by the fact that the data is null terminated, // meaning we haven't actually used up the entire buffer (cbAllocated), only // cbUsed (which should be cbAllocated - cbNullTerminator). Py_ssize_t cbNeed = cbUsed + cbRemaining + cbNullTerminator; pb = ReallocOrFreeBuffer(pb, cbNeed); if (!pb) return false; cbAllocated = cbNeed; } } else if (ret == SQL_SUCCESS) { // We read some data and this is the last batch (so we'll drop out of the // loop). // // If I'm reading the documentation correctly, SQLGetData is not going to // include the null terminator in cbRead. cbUsed += cbData; } } while (ret == SQL_SUCCESS_WITH_INFO); isNull = (ret == SQL_NULL_DATA); if (!isNull && cbUsed > 0) { pbResult = pb; cbResult = cbUsed; } else { pyodbc_free(pb); } return true; } static byte* ReallocOrFreeBuffer(byte* pb, Py_ssize_t cbNeed) { // Attempts to reallocate `pb` to size `cbNeed`. If the realloc fails, the original memory // is freed, a memory exception is set, and 0 is returned. Otherwise the new pointer is // returned. byte* pbNew = (byte*)realloc(pb, (size_t)cbNeed); if (pbNew == 0) { pyodbc_free(pb); PyErr_NoMemory(); return 0; } return pbNew; } static PyObject* GetText(Cursor* cur, Py_ssize_t iCol) { // We are reading one of the SQL_WCHAR, SQL_WVARCHAR, etc., and will return // a string. // // If there is no configuration we would expect this to be UTF-16 encoded data. (If no // byte-order-mark, we would expect it to be big-endian.) // // Now, just because the driver is telling us it is wide data doesn't mean it is true. // psqlodbc with UTF-8 will tell us it is wide data but you must ask for single-byte. // (Otherwise it is just UTF-8 with each character stored as 2 bytes.) That's why we allow // the user to configure. ColumnInfo* pinfo = &cur->colinfos[iCol]; const TextEnc& enc = IsWideType(pinfo->sql_type) ? cur->cnxn->sqlwchar_enc : cur->cnxn->sqlchar_enc; bool isNull = false; byte* pbData = 0; Py_ssize_t cbData = 0; if (!ReadVarColumn(cur, iCol, enc.ctype, isNull, pbData, cbData)) return 0; if (isNull) { I(pbData == 0 && cbData == 0); Py_RETURN_NONE; } PyObject* result = TextBufferToObject(enc, pbData, cbData); pyodbc_free(pbData); return result; } static PyObject* GetBinary(Cursor* cur, Py_ssize_t iCol) { // Reads SQL_BINARY. bool isNull = false; byte* pbData = 0; Py_ssize_t cbData = 0; if (!ReadVarColumn(cur, iCol, SQL_C_BINARY, isNull, pbData, cbData)) return 0; if (isNull) { I(pbData == 0 && cbData == 0); Py_RETURN_NONE; } PyObject* obj; #if PY_MAJOR_VERSION >= 3 obj = PyBytes_FromStringAndSize((char*)pbData, cbData); #else obj = PyByteArray_FromStringAndSize((char*)pbData, cbData); #endif pyodbc_free(pbData); return obj; } static PyObject* GetDataUser(Cursor* cur, Py_ssize_t iCol, int conv) { // conv // The index into the connection's user-defined conversions `conv_types`. bool isNull = false; byte* pbData = 0; Py_ssize_t cbData = 0; if (!ReadVarColumn(cur, iCol, SQL_C_BINARY, isNull, pbData, cbData)) return 0; if (isNull) { I(pbData == 0 && cbData == 0); Py_RETURN_NONE; } PyObject* value = PyBytes_FromStringAndSize((char*)pbData, cbData); pyodbc_free(pbData); if (!value) return 0; PyObject* result = PyObject_CallFunction(cur->cnxn->conv_funcs[conv], "(O)", value); Py_DECREF(value); if (!result) return 0; return result; } #if PY_VERSION_HEX < 0x02060000 static PyObject* GetDataBuffer(Cursor* cur, Py_ssize_t iCol) { PyObject* str = GetDataString(cur, iCol); if (str == Py_None) return str; PyObject* buffer = 0; if (str) { buffer = PyBuffer_FromObject(str, 0, PyString_GET_SIZE(str)); Py_DECREF(str); // If no buffer, release it. If buffer, the buffer owns it. } return buffer; } #endif static PyObject* GetDataDecimal(Cursor* cur, Py_ssize_t iCol) { // The SQL_NUMERIC_STRUCT support is hopeless (SQL Server ignores scale on input parameters and output columns, // Oracle does something else weird, and many drivers don't support it at all), so we'll rely on the Decimal's // string parsing. Unfortunately, the Decimal author does not pay attention to the locale, so we have to modify // the string ourselves. // // Oracle inserts group separators (commas in US, periods in some countries), so leave room for that too. // // Some databases support a 'money' type which also inserts currency symbols. Since we don't want to keep track of // all these, we'll ignore all characters we don't recognize. We will look for digits, negative sign (which I hope // is universal), and a decimal point ('.' or ',' usually). We'll do everything as Unicode in case currencies, // etc. are too far out. const TextEnc& enc = cur->cnxn->sqlwchar_enc; // I'm going to request the data as Unicode in case there is a weird currency symbol. If // this is a performance problems we may want a flag on this. bool isNull = false; byte* pbData = 0; Py_ssize_t cbData = 0; if (!ReadVarColumn(cur, iCol, enc.ctype, isNull, pbData, cbData)) return 0; if (isNull) { I(pbData == 0 && cbData == 0); Py_RETURN_NONE; } Object result(TextBufferToObject(enc, pbData, cbData)); pyodbc_free(pbData); if (!result) return 0; // Remove non-digits and convert the databases decimal to a '.' (required by decimal ctor). // // We are assuming that the decimal point and digits fit within the size of ODBCCHAR. // If Unicode, convert to UTF-8 and copy the digits and punctuation out. Since these are // all ASCII characters, we can ignore any multiple-byte characters. Fortunately, if a // character is multi-byte all bytes will have the high bit set. char* pch; Py_ssize_t cch; #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(result)) { pch = (char*)PyUnicode_AsUTF8AndSize(result, &cch); } else { int n = PyBytes_AsStringAndSize(result, &pch, &cch); if (n < 0) pch = 0; } #else Object encoded; if (PyUnicode_Check(result)) { encoded = PyUnicode_AsUTF8String(result); if (!encoded) return 0; result = encoded.Detach(); } int n = PyString_AsStringAndSize(result, &pch, &cch); if (n < 0) pch = 0; #endif if (!pch) return 0; // TODO: Why is this limited to 100? Also, can we perform a check on the original and use // it as-is? char ascii[100]; size_t asciilen = 0; const char* pchMax = pch + cch; while (pch < pchMax) { if ((*pch & 0x80) == 0) { if (*pch == chDecimal) { // Must force it to use '.' since the Decimal class doesn't pay attention to the locale. ascii[asciilen++] = '.'; } else if ((*pch >= '0' && *pch <= '9') || *pch == '-') { ascii[asciilen++] = (char)(*pch); } } pch++; } ascii[asciilen] = 0; Object str(PyString_FromStringAndSize(ascii, (Py_ssize_t)asciilen)); if (!str) return 0; PyObject* decimal_type = GetClassForThread("decimal", "Decimal"); if (!decimal_type) return 0; PyObject* decimal = PyObject_CallFunction(decimal_type, "O", str.Get()); Py_DECREF(decimal_type); return decimal; } static PyObject* GetDataBit(Cursor* cur, Py_ssize_t iCol) { SQLCHAR ch; SQLLEN cbFetched; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_BIT, &ch, sizeof(ch), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; if (ch == SQL_TRUE) Py_RETURN_TRUE; Py_RETURN_FALSE; } static PyObject* GetDataLong(Cursor* cur, Py_ssize_t iCol) { ColumnInfo* pinfo = &cur->colinfos[iCol]; SQLINTEGER value; SQLLEN cbFetched; SQLRETURN ret; SQLSMALLINT nCType = pinfo->is_unsigned ? SQL_C_ULONG : SQL_C_LONG; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), nCType, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; if (pinfo->is_unsigned) return PyInt_FromLong(*(SQLINTEGER*)&value); return PyInt_FromLong(value); } static PyObject* GetDataLongLong(Cursor* cur, Py_ssize_t iCol) { ColumnInfo* pinfo = &cur->colinfos[iCol]; SQLSMALLINT nCType = pinfo->is_unsigned ? SQL_C_UBIGINT : SQL_C_SBIGINT; SQLBIGINT value; SQLLEN cbFetched; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), nCType, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; if (pinfo->is_unsigned) return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)(SQLUBIGINT)value); return PyLong_FromLongLong((PY_LONG_LONG)value); } static PyObject* GetDataDouble(Cursor* cur, Py_ssize_t iCol) { double value; SQLLEN cbFetched = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_DOUBLE, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; return PyFloat_FromDouble(value); } static PyObject* GetSqlServerTime(Cursor* cur, Py_ssize_t iCol) { SQL_SS_TIME2_STRUCT value; SQLLEN cbFetched = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_BINARY, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; int micros = (int)(value.fraction / 1000); // nanos --> micros return PyTime_FromTime(value.hour, value.minute, value.second, micros); } static PyObject* GetUUID(Cursor* cur, Py_ssize_t iCol) { // REVIEW: Since GUID is a fixed size, do we need to pass the size or cbFetched? PYSQLGUID guid; SQLLEN cbFetched = 0; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_GUID, &guid, sizeof(guid), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; #if PY_MAJOR_VERSION >= 3 const char* szFmt = "(yyy#)"; #else const char* szFmt = "(sss#)"; #endif Object args(Py_BuildValue(szFmt, NULL, NULL, &guid, (int)sizeof(guid))); if (!args) return 0; PyObject* uuid_type = GetClassForThread("uuid", "UUID"); if (!uuid_type) return 0; PyObject* uuid = PyObject_CallObject(uuid_type, args.Get()); Py_DECREF(uuid_type); return uuid; } static PyObject* GetDataTimestamp(Cursor* cur, Py_ssize_t iCol) { TIMESTAMP_STRUCT value; SQLLEN cbFetched = 0; SQLRETURN ret; struct tm t; Py_BEGIN_ALLOW_THREADS ret = SQLGetData(cur->hstmt, (SQLUSMALLINT)(iCol+1), SQL_C_TYPE_TIMESTAMP, &value, sizeof(value), &cbFetched); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cur->cnxn, "SQLGetData", cur->cnxn->hdbc, cur->hstmt); if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; switch (cur->colinfos[iCol].sql_type) { case SQL_TYPE_TIME: { int micros = (int)(value.fraction / 1000); // nanos --> micros return PyTime_FromTime(value.hour, value.minute, value.second, micros); } case SQL_TYPE_DATE: return PyDate_FromDate(value.year, value.month, value.day); } int micros = (int)(value.fraction / 1000); // nanos --> micros if (value.hour == 24) { // some backends support 24:00 (hh:mm) as "end of a day" t.tm_year = value.year - 1900; // tm_year is 1900-based t.tm_mon = value.month - 1; // tm_mon is zero-based t.tm_mday = value.day; t.tm_hour = value.hour; t.tm_min = value.minute; t.tm_sec = value.second; t.tm_isdst = -1; // auto-adjust for dst mktime(&t); // normalize values in t return PyDateTime_FromDateAndTime( t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, micros ); } return PyDateTime_FromDateAndTime(value.year, value.month, value.day, value.hour, value.minute, value.second, micros); } int GetUserConvIndex(Cursor* cur, SQLSMALLINT sql_type) { // If this sql type has a user-defined conversion, the index into the connection's `conv_funcs` array is returned. // Otherwise -1 is returned. for (int i = 0; i < cur->cnxn->conv_count; i++) if (cur->cnxn->conv_types[i] == sql_type) return i; return -1; } PyObject* PythonTypeFromSqlType(Cursor* cur, SQLSMALLINT type) { // Returns a type object ('int', 'str', etc.) for the given ODBC C type. This is used to populate // Cursor.description with the type of Python object that will be returned for each column. // // type // The ODBC C type (SQL_C_CHAR, etc.) of the column. // // The returned object does not have its reference count incremented (is a borrowed // reference). // // Keep this in sync with GetData below. int conv_index = GetUserConvIndex(cur, type); if (conv_index != -1) return (PyObject*)&PyString_Type; PyObject* pytype = 0; bool incref = true; switch (type) { case SQL_CHAR: case SQL_VARCHAR: case SQL_LONGVARCHAR: #if PY_MAJOR_VERSION < 3 if (cur->cnxn->str_enc.ctype == SQL_C_CHAR) pytype = (PyObject*)&PyString_Type; else pytype = (PyObject*)&PyUnicode_Type; #else pytype = (PyObject*)&PyUnicode_Type; #endif break; case SQL_GUID: if (UseNativeUUID()) { pytype = GetClassForThread("uuid", "UUID"); incref = false; } else { #if PY_MAJOR_VERSION < 3 if (cur->cnxn->str_enc.ctype == SQL_C_CHAR) pytype = (PyObject*)&PyString_Type; else pytype = (PyObject*)&PyUnicode_Type; #else pytype = (PyObject*)&PyUnicode_Type; #endif } break; case SQL_WCHAR: case SQL_WVARCHAR: case SQL_WLONGVARCHAR: case SQL_SS_XML: case SQL_DB2_XML: pytype = (PyObject*)&PyUnicode_Type; break; case SQL_DECIMAL: case SQL_NUMERIC: pytype = GetClassForThread("decimal", "Decimal"); incref = false; break; case SQL_REAL: case SQL_FLOAT: case SQL_DOUBLE: pytype = (PyObject*)&PyFloat_Type; break; case SQL_SMALLINT: case SQL_INTEGER: case SQL_TINYINT: pytype = (PyObject*)&PyInt_Type; break; case SQL_TYPE_DATE: pytype = (PyObject*)PyDateTimeAPI->DateType; break; case SQL_TYPE_TIME: case SQL_SS_TIME2: // SQL Server 2008+ pytype = (PyObject*)PyDateTimeAPI->TimeType; break; case SQL_TYPE_TIMESTAMP: pytype = (PyObject*)PyDateTimeAPI->DateTimeType; break; case SQL_BIGINT: pytype = (PyObject*)&PyLong_Type; break; case SQL_BIT: pytype = (PyObject*)&PyBool_Type; break; case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: default: #if PY_VERSION_HEX >= 0x02060000 pytype = (PyObject*)&PyByteArray_Type; #else pytype = (PyObject*)&PyBuffer_Type; #endif break; } if (pytype && incref) Py_INCREF(pytype); return pytype; } PyObject* GetData(Cursor* cur, Py_ssize_t iCol) { // Returns an object representing the value in the row/field. If 0 is returned, an exception has already been set. // // The data is assumed to be the default C type for the column's SQL type. ColumnInfo* pinfo = &cur->colinfos[iCol]; // First see if there is a user-defined conversion. int conv_index = GetUserConvIndex(cur, pinfo->sql_type); if (conv_index != -1) return GetDataUser(cur, iCol, conv_index); switch (pinfo->sql_type) { case SQL_WCHAR: case SQL_WVARCHAR: case SQL_WLONGVARCHAR: return GetText(cur, iCol); case SQL_CHAR: case SQL_VARCHAR: case SQL_LONGVARCHAR: case SQL_SS_XML: case SQL_DB2_XML: return GetText(cur, iCol); case SQL_GUID: if (UseNativeUUID()) return GetUUID(cur, iCol); return GetText(cur, iCol); break; case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: return GetBinary(cur, iCol); case SQL_DECIMAL: case SQL_NUMERIC: case SQL_DB2_DECFLOAT: return GetDataDecimal(cur, iCol); case SQL_BIT: return GetDataBit(cur, iCol); case SQL_TINYINT: case SQL_SMALLINT: case SQL_INTEGER: return GetDataLong(cur, iCol); case SQL_BIGINT: return GetDataLongLong(cur, iCol); case SQL_REAL: case SQL_FLOAT: case SQL_DOUBLE: return GetDataDouble(cur, iCol); case SQL_TYPE_DATE: case SQL_TYPE_TIME: case SQL_TYPE_TIMESTAMP: return GetDataTimestamp(cur, iCol); case SQL_SS_TIME2: return GetSqlServerTime(cur, iCol); } return RaiseErrorV("HY106", ProgrammingError, "ODBC SQL type %d is not yet supported. column-index=%zd type=%d", (int)pinfo->sql_type, iCol, (int)pinfo->sql_type); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/getdata.h0000664000175000017500000000063300000000000016776 0ustar00mkleehammermkleehammer #ifndef _GETDATA_H_ #define _GETDATA_H_ void GetData_init(); PyObject* PythonTypeFromSqlType(Cursor* cur, SQLSMALLINT type); PyObject* GetData(Cursor* cur, Py_ssize_t iCol); /** * If this sql type has a user-defined conversion, the index into the connection's `conv_funcs` array is returned. * Otherwise -1 is returned. */ int GetUserConvIndex(Cursor* cur, SQLSMALLINT sql_type); #endif // _GETDATA_H_ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629396312.0 pyodbc-4.0.32/src/params.cpp0000664000175000017500000021747600000000000017222 0ustar00mkleehammermkleehammer// https://msdn.microsoft.com/en-us/library/ms711014(v=vs.85).aspx // // "The length of both the data buffer and the data it contains is measured in bytes, as // opposed to characters." // // https://msdn.microsoft.com/en-us/library/ms711786(v=vs.85).aspx // // Column Size: "For character types, this is the length in characters of the data" #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "pyodbcmodule.h" #include "cursor.h" #include "params.h" #include "connection.h" #include "buffer.h" #include "errors.h" #include "dbspecific.h" #include "row.h" #include inline Connection* GetConnection(Cursor* cursor) { return (Connection*)cursor->cnxn; } struct DAEParam { PyObject *cell; SQLLEN maxlen; }; // Detects and sets the appropriate C type to use for binding the specified Python object. // Also sets the buffer length to use. // Returns false if unsuccessful. static int DetectCType(PyObject *cell, ParamInfo *pi) { PyObject* cls = 0; if (PyBool_Check(cell)) { Type_Bool: pi->ValueType = SQL_C_BIT; pi->BufferLength = 1; } #if PY_MAJOR_VERSION < 3 else if (PyInt_Check(cell)) { Type_Int: pi->ValueType = sizeof(long) == 8 ? SQL_C_SBIGINT : SQL_C_LONG; pi->BufferLength = sizeof(long); } #endif else if (PyLong_Check(cell)) { Type_Long: if (pi->ParameterType == SQL_NUMERIC || pi->ParameterType == SQL_DECIMAL) { pi->ValueType = SQL_C_NUMERIC; pi->BufferLength = sizeof(SQL_NUMERIC_STRUCT); } else { pi->ValueType = SQL_C_SBIGINT; pi->BufferLength = sizeof(long long); } } else if (PyFloat_Check(cell)) { Type_Float: pi->ValueType = SQL_C_DOUBLE; pi->BufferLength = sizeof(double); } else if (PyBytes_Check(cell)) { Type_Bytes: // Assume the SQL type is also character (2.x) or binary (3.x). // If it is a max-type (ColumnSize == 0), use DAE. #if PY_MAJOR_VERSION < 3 pi->ValueType = SQL_C_CHAR; #else pi->ValueType = SQL_C_BINARY; #endif pi->BufferLength = pi->ColumnSize ? pi->ColumnSize : sizeof(DAEParam); } else if (PyUnicode_Check(cell)) { Type_Unicode: // Assume the SQL type is also wide character. // If it is a max-type (ColumnSize == 0), use DAE. pi->ValueType = SQL_C_WCHAR; pi->BufferLength = pi->ColumnSize ? pi->ColumnSize * sizeof(SQLWCHAR) : sizeof(DAEParam); } else if (PyDateTime_Check(cell)) { Type_DateTime: pi->ValueType = SQL_C_TYPE_TIMESTAMP; pi->BufferLength = sizeof(SQL_TIMESTAMP_STRUCT); } else if (PyDate_Check(cell)) { Type_Date: pi->ValueType = SQL_C_TYPE_DATE; pi->BufferLength = sizeof(SQL_DATE_STRUCT); } else if (PyTime_Check(cell)) { Type_Time: if (pi->ParameterType == SQL_SS_TIME2) { pi->ValueType = SQL_C_BINARY; pi->BufferLength = sizeof(SQL_SS_TIME2_STRUCT); } else { pi->ValueType = SQL_C_TYPE_TIME; pi->BufferLength = sizeof(SQL_TIME_STRUCT); } } #if PY_VERSION_HEX >= 0x02060000 else if (PyByteArray_Check(cell)) { Type_ByteArray: pi->ValueType = SQL_C_BINARY; pi->BufferLength = pi->ColumnSize ? pi->ColumnSize : sizeof(DAEParam); } #endif #if PY_MAJOR_VERSION < 3 else if (PyBuffer_Check(cell)) { pi->ValueType = SQL_C_BINARY; pi->BufferLength = pi->ColumnSize && PyBuffer_GetMemory(cell, 0) >= 0 ? pi->ColumnSize : sizeof(DAEParam); } #endif else if (cell == Py_None || cell == null_binary) { // Use the SQL type to guess what Nones should be inserted as here. switch (pi->ParameterType) { case SQL_CHAR: case SQL_VARCHAR: case SQL_LONGVARCHAR: goto Type_Bytes; case SQL_WCHAR: case SQL_WVARCHAR: case SQL_WLONGVARCHAR: goto Type_Unicode; case SQL_DECIMAL: case SQL_NUMERIC: goto Type_Decimal; case SQL_BIGINT: goto Type_Long; case SQL_SMALLINT: case SQL_INTEGER: case SQL_TINYINT: #if PY_MAJOR_VERSION < 3 goto Type_Int; #else goto Type_Long; #endif case SQL_REAL: case SQL_FLOAT: case SQL_DOUBLE: goto Type_Float; case SQL_BIT: goto Type_Bool; case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: #if PY_VERSION_HEX >= 0x02060000 goto Type_ByteArray; #else goto Type_Bytes; #endif case SQL_TYPE_DATE: goto Type_Date; case SQL_SS_TIME2: case SQL_TYPE_TIME: goto Type_Time; case SQL_TYPE_TIMESTAMP: goto Type_DateTime; case SQL_GUID: goto Type_UUID; default: goto Type_Bytes; } } else if (IsInstanceForThread(cell, "uuid", "UUID", &cls) && cls) { Type_UUID: // UUID pi->ValueType = SQL_C_GUID; pi->BufferLength = 16; } else if (IsInstanceForThread(cell, "decimal", "Decimal", &cls) && cls) { Type_Decimal: pi->ValueType = SQL_C_NUMERIC; pi->BufferLength = sizeof(SQL_NUMERIC_STRUCT); } else { RaiseErrorV(0, ProgrammingError, "Unknown object type %s during describe", cell->ob_type->tp_name); return false; } return true; } #define WRITEOUT(type, ptr, val, indv) { *(type*)(*ptr) = (val); *ptr += sizeof(type); indv = sizeof(type); } // Convert Python object into C data for binding. // Output pointer is written to with data, indicator, and updated. // Returns false if object could not be converted. static int PyToCType(Cursor *cur, unsigned char **outbuf, PyObject *cell, ParamInfo *pi) { PyObject *cls = 0; // TODO: Any way to make this a switch (O(1)) or similar instead of if-else chain? // TODO: Otherwise, rearrange these cases in order of frequency... SQLLEN ind; if (PyBool_Check(cell)) { if (pi->ValueType != SQL_C_BIT) return false; WRITEOUT(char, outbuf, cell == Py_True, ind); } #if PY_MAJOR_VERSION < 3 else if (PyInt_Check(cell)) { if (pi->ValueType != (sizeof(long) == 8 ? SQL_C_SBIGINT : SQL_C_LONG)) return false; WRITEOUT(long, outbuf, PyInt_AS_LONG(cell), ind); } #endif else if (PyLong_Check(cell)) { if (pi->ValueType == SQL_C_SBIGINT) { WRITEOUT(long long, outbuf, PyLong_AsLongLong(cell), ind); } else if (pi->ValueType == SQL_C_NUMERIC) { // Convert a PyLong into a SQL_NUMERIC_STRUCT, without losing precision // or taking an unnecessary trip through character strings. SQL_NUMERIC_STRUCT *pNum = (SQL_NUMERIC_STRUCT*)*outbuf; PyObject *absVal = PyNumber_Absolute(cell); if (pi->DecimalDigits) { static PyObject *scaler_table[38]; static PyObject *tenObject; // Need to scale by 10**pi->DecimalDigits if (pi->DecimalDigits > 38) { NumericOverflow: RaiseErrorV(0, ProgrammingError, "Numeric overflow"); Py_XDECREF(absVal); return false; } if (!scaler_table[pi->DecimalDigits - 1]) { if (!tenObject) tenObject = PyInt_FromLong(10); PyObject *scaleObj = PyInt_FromLong(pi->DecimalDigits); scaler_table[pi->DecimalDigits - 1] = PyNumber_Power(tenObject, scaleObj, Py_None); Py_XDECREF(scaleObj); } PyObject *scaledVal = PyNumber_Multiply(absVal, scaler_table[pi->DecimalDigits - 1]); Py_XDECREF(absVal); absVal = scaledVal; } pNum->precision = (SQLCHAR)pi->ColumnSize; pNum->scale = (SQLCHAR)pi->DecimalDigits; pNum->sign = _PyLong_Sign(cell) >= 0; if (_PyLong_AsByteArray((PyLongObject*)absVal, pNum->val, sizeof(pNum->val), 1, 0)) goto NumericOverflow; Py_XDECREF(absVal); *outbuf += pi->BufferLength; ind = sizeof(SQL_NUMERIC_STRUCT); } else return false; } else if (PyFloat_Check(cell)) { if (pi->ValueType != SQL_C_DOUBLE) return false; WRITEOUT(double, outbuf, PyFloat_AS_DOUBLE(cell), ind); } else if (PyBytes_Check(cell)) { #if PY_MAJOR_VERSION < 3 if (pi->ValueType != SQL_C_CHAR) #else if (pi->ValueType != SQL_C_BINARY) #endif return false; Py_ssize_t len = PyBytes_GET_SIZE(cell); if (!pi->ColumnSize) // DAE { DAEParam *pParam = (DAEParam*)*outbuf; Py_INCREF(cell); pParam->cell = cell; pParam->maxlen = cur->cnxn->GetMaxLength(pi->ValueType); *outbuf += sizeof(DAEParam); ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; } else { if (len > pi->BufferLength) { RaiseErrorV(0, ProgrammingError, "String data, right truncation: length %u buffer %u", len, pi->BufferLength); return false; } memcpy(*outbuf, PyBytes_AS_STRING(cell), len); *outbuf += pi->BufferLength; ind = len; } } else if (PyUnicode_Check(cell)) { if (pi->ValueType != SQL_C_WCHAR) return false; Py_ssize_t len = PyUnicode_GET_SIZE(cell); // Same size Different size // DAE DAE only Convert + DAE // non-DAE Copy Convert + Copy if (sizeof(Py_UNICODE) != sizeof(SQLWCHAR)) { const TextEnc& enc = cur->cnxn->unicode_enc; Object encoded(PyCodec_Encode(cell, enc.name, "strict")); if (!encoded) return false; if (enc.optenc == OPTENC_NONE && !PyBytes_CheckExact(encoded)) { PyErr_Format(PyExc_TypeError, "Unicode write encoding '%s' returned unexpected data type: %s", enc.name, encoded.Get()->ob_type->tp_name); return false; } len = PyBytes_GET_SIZE(encoded); if (!pi->ColumnSize) { // DAE DAEParam *pParam = (DAEParam*)*outbuf; Py_INCREF(cell); pParam->cell = encoded.Detach(); pParam->maxlen = cur->cnxn->GetMaxLength(pi->ValueType); *outbuf += sizeof(DAEParam); ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; } else { if (len > pi->BufferLength) { RaiseErrorV(0, ProgrammingError, "String data, right truncation: length %u buffer %u", len, pi->BufferLength); return false; } memcpy(*outbuf, PyBytes_AS_STRING((PyObject*)encoded), len); *outbuf += pi->BufferLength; ind = len; } } else { len *= sizeof(SQLWCHAR); if (!pi->ColumnSize) // DAE { Py_INCREF(cell); DAEParam *pParam = (DAEParam*)*outbuf; pParam->cell = cell; pParam->maxlen= cur->cnxn->GetMaxLength(pi->ValueType); *outbuf += sizeof(DAEParam); ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; } else { if (len > pi->BufferLength) { RaiseErrorV(0, ProgrammingError, "String data, right truncation: length %u buffer %u", len, pi->BufferLength); return false; } memcpy(*outbuf, PyUnicode_AS_DATA(cell), len); *outbuf += pi->BufferLength; ind = len; } } } else if (PyDateTime_Check(cell)) { if (pi->ValueType != SQL_C_TYPE_TIMESTAMP) return false; SQL_TIMESTAMP_STRUCT *pts = (SQL_TIMESTAMP_STRUCT*)*outbuf; pts->year = PyDateTime_GET_YEAR(cell); pts->month = PyDateTime_GET_MONTH(cell); pts->day = PyDateTime_GET_DAY(cell); pts->hour = PyDateTime_DATE_GET_HOUR(cell); pts->minute = PyDateTime_DATE_GET_MINUTE(cell); pts->second = PyDateTime_DATE_GET_SECOND(cell); // Truncate the fraction according to precision size_t digits = min(9, pi->DecimalDigits); long fast_pow10[] = {1,10,100,1000,10000,100000,1000000,10000000,100000000,1000000000}; SQLUINTEGER milliseconds = PyDateTime_DATE_GET_MICROSECOND(cell) * 1000; pts->fraction = milliseconds - (milliseconds % fast_pow10[9 - digits]); *outbuf += sizeof(SQL_TIMESTAMP_STRUCT); ind = sizeof(SQL_TIMESTAMP_STRUCT); } else if (PyDate_Check(cell)) { if (pi->ValueType != SQL_C_TYPE_DATE) return false; SQL_DATE_STRUCT *pds = (SQL_DATE_STRUCT*)*outbuf; pds->year = PyDateTime_GET_YEAR(cell); pds->month = PyDateTime_GET_MONTH(cell); pds->day = PyDateTime_GET_DAY(cell); *outbuf += sizeof(SQL_DATE_STRUCT); ind = sizeof(SQL_DATE_STRUCT); } else if (PyTime_Check(cell)) { if (pi->ParameterType == SQL_SS_TIME2) { if (pi->ValueType != SQL_C_BINARY) return false; SQL_SS_TIME2_STRUCT *pt2s = (SQL_SS_TIME2_STRUCT*)*outbuf; pt2s->hour = PyDateTime_TIME_GET_HOUR(cell); pt2s->minute = PyDateTime_TIME_GET_MINUTE(cell); pt2s->second = PyDateTime_TIME_GET_SECOND(cell); // This is in units of nanoseconds. pt2s->fraction = PyDateTime_TIME_GET_MICROSECOND(cell)*1000; *outbuf += sizeof(SQL_SS_TIME2_STRUCT); ind = sizeof(SQL_SS_TIME2_STRUCT); } else { if (pi->ValueType != SQL_C_TYPE_TIME) return false; SQL_TIME_STRUCT *pts = (SQL_TIME_STRUCT*)*outbuf; pts->hour = PyDateTime_TIME_GET_HOUR(cell); pts->minute = PyDateTime_TIME_GET_MINUTE(cell); pts->second = PyDateTime_TIME_GET_SECOND(cell); *outbuf += sizeof(SQL_TIME_STRUCT); ind = sizeof(SQL_TIME_STRUCT); } } #if PY_VERSION_HEX >= 0x02060000 else if (PyByteArray_Check(cell)) { if (pi->ValueType != SQL_C_BINARY) return false; Py_ssize_t len = PyByteArray_GET_SIZE(cell); if (!pi->ColumnSize) // DAE { DAEParam *pParam = (DAEParam*)*outbuf; Py_INCREF(cell); pParam->cell = cell; pParam->maxlen = cur->cnxn->GetMaxLength(pi->ValueType); *outbuf += sizeof(DAEParam); ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; } else { if (len > pi->BufferLength) { RaiseErrorV(0, ProgrammingError, "String data, right truncation: length %u buffer %u", len, pi->BufferLength); return false; } memcpy(*outbuf, PyByteArray_AS_STRING(cell), len); *outbuf += pi->BufferLength; ind = len; } } #endif #if PY_MAJOR_VERSION < 3 else if (PyBuffer_Check(cell)) { if (pi->ValueType != SQL_C_BINARY) return false; const char* pb; Py_ssize_t len = PyBuffer_GetMemory(cell, &pb); if (len < 0) { // DAE DAEParam *pParam = (DAEParam*)*outbuf; len = PyBuffer_Size(cell); Py_INCREF(cell); pParam->cell = cell; pParam->maxlen = cur->cnxn->GetMaxLength(pi->ValueType); *outbuf += pi->BufferLength; ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; } else { if (len > pi->BufferLength) { RaiseErrorV(0, ProgrammingError, "String data, right truncation: row %u column %u", 0 /* TODO */, 0 /* TODO */); return false; } memcpy(*outbuf, pb, len); *outbuf += pi->BufferLength; ind = len; } } #endif else if (IsInstanceForThread(cell, "uuid", "UUID", &cls) && cls) { if (pi->ValueType != SQL_C_GUID) return false; pi->BufferLength = 16; // Do we need to use "bytes" on a big endian machine? Object b(PyObject_GetAttrString(cell, "bytes_le")); if (!b) return false; memcpy(*outbuf, PyBytes_AS_STRING(b.Get()), sizeof(SQLGUID)); *outbuf += pi->BufferLength; ind = 16; } else if (IsInstanceForThread(cell, "decimal", "Decimal", &cls) && cls) { if (pi->ValueType != SQL_C_NUMERIC) return false; // Normalise, then get sign, exponent, and digits. PyObject *normCell = PyObject_CallMethod(cell, "normalize", 0); if (!normCell) return false; PyObject *cellParts = PyObject_CallMethod(normCell, "as_tuple", 0); if (!cellParts) return false; Py_XDECREF(normCell); SQL_NUMERIC_STRUCT *pNum = (SQL_NUMERIC_STRUCT*)*outbuf; pNum->sign = !PyInt_AsLong(PyTuple_GET_ITEM(cellParts, 0)); PyObject* digits = PyTuple_GET_ITEM(cellParts, 1); long exp = PyInt_AsLong(PyTuple_GET_ITEM(cellParts, 2)); Py_ssize_t numDigits = PyTuple_GET_SIZE(digits); // PyDecimal is digits * 10**exp = digits / 10**-exp // SQL_NUMERIC_STRUCT is val / 10**scale Py_ssize_t scaleDiff = pi->DecimalDigits + exp; if (scaleDiff < 0) { RaiseErrorV(0, ProgrammingError, "Converting decimal loses precision"); return false; } // Append '0's to the end of the digits to effect the scaling. PyObject *newDigits = PyTuple_New(numDigits + scaleDiff); for (Py_ssize_t i = 0; i < numDigits; i++) { PyTuple_SET_ITEM(newDigits, i, PyInt_FromLong(PyNumber_AsSsize_t(PyTuple_GET_ITEM(digits, i), 0))); } for (Py_ssize_t i = numDigits; i < scaleDiff + numDigits; i++) { PyTuple_SET_ITEM(newDigits, i, PyInt_FromLong(0)); } PyObject *args = Py_BuildValue("((iOi))", 0, newDigits, 0); PyObject *scaledDecimal = PyObject_CallObject((PyObject*)cell->ob_type, args); PyObject *digitLong = PyNumber_Long(scaledDecimal); Py_XDECREF(args); Py_XDECREF(scaledDecimal); Py_XDECREF(cellParts); pNum->precision = (SQLCHAR)pi->ColumnSize; pNum->scale = (SQLCHAR)pi->DecimalDigits; int ret = _PyLong_AsByteArray((PyLongObject*)digitLong, pNum->val, sizeof(pNum->val), 1, 0); Py_XDECREF(digitLong); if (ret) { PyErr_Clear(); RaiseErrorV(0, ProgrammingError, "Numeric overflow"); return false; } *outbuf += pi->BufferLength; ind = sizeof(SQL_NUMERIC_STRUCT); } else if (cell == Py_None || cell == null_binary) { *outbuf += pi->BufferLength; ind = SQL_NULL_DATA; } else { RaiseErrorV(0, ProgrammingError, "Unknown object type: %s",cell->ob_type->tp_name); return false; } *(SQLLEN*)(*outbuf) = ind; *outbuf += sizeof(SQLLEN); return true; } static bool GetParamType(Cursor* cur, Py_ssize_t iParam, SQLSMALLINT& type); static void FreeInfos(ParamInfo* a, Py_ssize_t count) { for (Py_ssize_t i = 0; i < count; i++) { if (a[i].allocated) pyodbc_free(a[i].ParameterValuePtr); if (a[i].ParameterType == SQL_SS_TABLE && a[i].nested) FreeInfos(a[i].nested, a[i].maxlength); Py_XDECREF(a[i].pObject); } pyodbc_free(a); } static bool GetNullInfo(Cursor* cur, Py_ssize_t index, ParamInfo& info) { if (!GetParamType(cur, index, info.ParameterType)) return false; info.ValueType = SQL_C_DEFAULT; info.ColumnSize = 1; info.StrLen_or_Ind = SQL_NULL_DATA; return true; } static bool GetNullBinaryInfo(Cursor* cur, Py_ssize_t index, ParamInfo& info) { info.ValueType = SQL_C_BINARY; info.ParameterType = SQL_BINARY; info.ColumnSize = 1; info.ParameterValuePtr = 0; info.StrLen_or_Ind = SQL_NULL_DATA; return true; } #if PY_MAJOR_VERSION >= 3 static bool GetBytesInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { // The Python 3 version that writes bytes as binary data. Py_ssize_t cb = PyBytes_GET_SIZE(param); info.ValueType = SQL_C_BINARY; info.ColumnSize = isTVP ? 0 : (SQLUINTEGER)max(cb, 1); SQLLEN maxlength = cur->cnxn->GetMaxLength(info.ValueType); if (maxlength == 0 || cb <= maxlength || isTVP) { info.ParameterType = SQL_VARBINARY; info.StrLen_or_Ind = cb; info.BufferLength = (SQLLEN)info.ColumnSize; info.ParameterValuePtr = PyBytes_AS_STRING(param); } else { // Too long to pass all at once, so we'll provide the data at execute. info.ParameterType = SQL_LONGVARBINARY; info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)cb) : SQL_DATA_AT_EXEC; info.ParameterValuePtr = &info; info.BufferLength = sizeof(ParamInfo*); info.pObject = param; Py_INCREF(info.pObject); info.maxlength = maxlength; } return true; } #endif #if PY_MAJOR_VERSION < 3 static bool GetStrInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { const TextEnc& enc = cur->cnxn->str_enc; info.ValueType = enc.ctype; Py_ssize_t cch = PyString_GET_SIZE(param); info.ColumnSize = isTVP ? 0 : (SQLUINTEGER)max(cch, 1); Object encoded; if (enc.optenc == OPTENC_RAW) { // Take the text as-is. This is not really a good idea since users will need to make // sure the encoding over the wire matches their system encoding, but it will be wanted // and it is fast when you get it to work. encoded = param; } else { // Encode the text with the user's encoding. encoded = PyCodec_Encode(param, enc.name, "strict"); if (!encoded) return false; if (!PyBytes_CheckExact(encoded)) { // Not all encodings return bytes. PyErr_Format(PyExc_TypeError, "Unicode read encoding '%s' returned unexpected data type: %s", enc.name, encoded.Get()->ob_type->tp_name); return false; } } Py_ssize_t cb = PyBytes_GET_SIZE(encoded); info.pObject = encoded.Detach(); SQLLEN maxlength = cur->cnxn->GetMaxLength(info.ValueType); if (maxlength == 0 || cb <= maxlength || isTVP) { info.ParameterType = (enc.ctype == SQL_C_CHAR) ? SQL_VARCHAR : SQL_WVARCHAR; info.ParameterValuePtr = PyBytes_AS_STRING(info.pObject); info.StrLen_or_Ind = (SQLINTEGER)cb; } else { // Too long to pass all at once, so we'll provide the data at execute. info.ParameterType = (enc.ctype == SQL_C_CHAR) ? SQL_LONGVARCHAR : SQL_WLONGVARCHAR; info.ParameterValuePtr = &info; info.BufferLength = sizeof(ParamInfo*); info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLINTEGER)cb) : SQL_DATA_AT_EXEC; info.maxlength = maxlength; } return true; } #endif static bool GetUnicodeInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { const TextEnc& enc = cur->cnxn->unicode_enc; info.ValueType = enc.ctype; Object encoded(PyCodec_Encode(param, enc.name, "strict")); if (!encoded) return false; if (enc.optenc == OPTENC_NONE && !PyBytes_CheckExact(encoded)) { PyErr_Format(PyExc_TypeError, "Unicode write encoding '%s' returned unexpected data type: %s", enc.name, encoded.Get()->ob_type->tp_name); return false; } Py_ssize_t cb = PyBytes_GET_SIZE(encoded); int denom = 1; if (enc.optenc == OPTENC_UTF16) { denom = 2; } else if (enc.optenc == OPTENC_UTF32) { denom = 4; } info.ColumnSize = isTVP ? 0 : (SQLUINTEGER)max(cb / denom, 1); info.pObject = encoded.Detach(); SQLLEN maxlength = cur->cnxn->GetMaxLength(enc.ctype); if (maxlength == 0 || cb <= maxlength || isTVP) { info.ParameterType = (enc.ctype == SQL_C_CHAR) ? SQL_VARCHAR : SQL_WVARCHAR; info.ParameterValuePtr = PyBytes_AS_STRING(info.pObject); info.BufferLength = (SQLINTEGER)cb; info.StrLen_or_Ind = (SQLINTEGER)cb; } else { // Too long to pass all at once, so we'll provide the data at execute. info.ParameterType = (enc.ctype == SQL_C_CHAR) ? SQL_LONGVARCHAR : SQL_WLONGVARCHAR; info.ParameterValuePtr = &info; info.BufferLength = sizeof(ParamInfo*); info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLINTEGER)cb) : SQL_DATA_AT_EXEC; info.maxlength = maxlength; } return true; } static bool GetBooleanInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.ValueType = SQL_C_BIT; info.ParameterType = SQL_BIT; info.StrLen_or_Ind = 1; info.Data.ch = (unsigned char)(param == Py_True ? 1 : 0); info.ParameterValuePtr = &info.Data.ch; return true; } static bool GetDateTimeInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.Data.timestamp.year = (SQLSMALLINT) PyDateTime_GET_YEAR(param); info.Data.timestamp.month = (SQLUSMALLINT)PyDateTime_GET_MONTH(param); info.Data.timestamp.day = (SQLUSMALLINT)PyDateTime_GET_DAY(param); info.Data.timestamp.hour = (SQLUSMALLINT)PyDateTime_DATE_GET_HOUR(param); info.Data.timestamp.minute = (SQLUSMALLINT)PyDateTime_DATE_GET_MINUTE(param); info.Data.timestamp.second = (SQLUSMALLINT)PyDateTime_DATE_GET_SECOND(param); // SQL Server chokes if the fraction has more data than the database supports. We expect other databases to be the // same, so we reduce the value to what the database supports. http://support.microsoft.com/kb/263872 int precision = ((Connection*)cur->cnxn)->datetime_precision - 20; // (20 includes a separating period) if (precision <= 0) { info.Data.timestamp.fraction = 0; } else { info.Data.timestamp.fraction = (SQLUINTEGER)(PyDateTime_DATE_GET_MICROSECOND(param) * 1000); // 1000 == micro -> nano // (How many leading digits do we want to keep? With SQL Server 2005, this should be 3: 123000000) int keep = (int)pow(10.0, 9-min(9, precision)); info.Data.timestamp.fraction = info.Data.timestamp.fraction / keep * keep; info.DecimalDigits = (SQLSMALLINT)precision; } info.ValueType = SQL_C_TIMESTAMP; info.ParameterType = SQL_TIMESTAMP; info.ColumnSize = (SQLUINTEGER)((Connection*)cur->cnxn)->datetime_precision; info.StrLen_or_Ind = sizeof(TIMESTAMP_STRUCT); info.ParameterValuePtr = &info.Data.timestamp; return true; } static bool GetDateInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.Data.date.year = (SQLSMALLINT) PyDateTime_GET_YEAR(param); info.Data.date.month = (SQLUSMALLINT)PyDateTime_GET_MONTH(param); info.Data.date.day = (SQLUSMALLINT)PyDateTime_GET_DAY(param); info.ValueType = SQL_C_TYPE_DATE; info.ParameterType = SQL_TYPE_DATE; info.ColumnSize = 10; info.ParameterValuePtr = &info.Data.date; info.StrLen_or_Ind = sizeof(DATE_STRUCT); return true; } static bool GetTimeInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.Data.time.hour = (SQLUSMALLINT)PyDateTime_TIME_GET_HOUR(param); info.Data.time.minute = (SQLUSMALLINT)PyDateTime_TIME_GET_MINUTE(param); info.Data.time.second = (SQLUSMALLINT)PyDateTime_TIME_GET_SECOND(param); info.ValueType = SQL_C_TYPE_TIME; info.ParameterType = SQL_TYPE_TIME; info.ColumnSize = 8; info.ParameterValuePtr = &info.Data.time; info.StrLen_or_Ind = sizeof(TIME_STRUCT); return true; } inline bool NeedsBigInt(long long ll) { // NOTE: Smallest 32-bit int should be -214748368 but the MS compiler v.1900 AMD64 // says that (10 < -2147483648). Perhaps I miscalculated the minimum? return ll < -2147483647 || ll > 2147483647; } #if PY_MAJOR_VERSION < 3 static bool GetIntInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { long long value = PyLong_AsLongLong(param); if (PyErr_Occurred()) return false; if (isTVP || NeedsBigInt(value)) { info.Data.i64 = (INT64)value; info.ValueType = SQL_C_SBIGINT; info.ParameterType = SQL_BIGINT; info.ParameterValuePtr = &info.Data.i64; info.StrLen_or_Ind = 8; } else { info.Data.i32 = (int)value; info.ValueType = SQL_C_LONG; info.ParameterType = SQL_INTEGER; info.ParameterValuePtr = &info.Data.i32; info.StrLen_or_Ind = 4; } return true; } #endif static bool GetLongInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { // Since some drivers like Access don't support BIGINT, we use INTEGER when possible. // Unfortunately this may mean that we end up with two execution plans for the same SQL. // We could use SQLDescribeParam but that's kind of expensive. long long value = PyLong_AsLongLong(param); if (PyErr_Occurred()) return false; if (isTVP || NeedsBigInt(value)) { info.Data.i64 = (INT64)value; info.ValueType = SQL_C_SBIGINT; info.ParameterType = SQL_BIGINT; info.ParameterValuePtr = &info.Data.i64; info.StrLen_or_Ind = 8; } else { info.Data.i32 = (int)value; info.ValueType = SQL_C_LONG; info.ParameterType = SQL_INTEGER; info.ParameterValuePtr = &info.Data.i32; info.StrLen_or_Ind = 4; } return true; } static bool GetFloatInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { // Python floats are usually numeric values, but can also be "Infinity" or "NaN". // https://docs.python.org/3/library/functions.html#float // PyFloat_AsDouble() does not generate an error for Infinity/NaN, and it is not // easy to check for those values. Typically, the database will reject them. double value = PyFloat_AsDouble(param); if (PyErr_Occurred()) return false; info.Data.dbl = value; info.ValueType = SQL_C_DOUBLE; info.ParameterType = SQL_DOUBLE; info.ParameterValuePtr = &info.Data.dbl; info.ColumnSize = 15; info.StrLen_or_Ind = sizeof(double); return true; } static char* CreateDecimalString(long sign, PyObject* digits, long exp) { // Allocate an ASCII string containing the given decimal. long count = (long)PyTuple_GET_SIZE(digits); char* pch; long len; if (exp >= 0) { // (1 2 3) exp = 2 --> '12300' len = sign + count + exp + 1; // 1: NULL pch = (char*)pyodbc_malloc((size_t)len); if (pch) { char* p = pch; if (sign) *p++ = '-'; for (long i = 0; i < count; i++) *p++ = (char)('0' + PyInt_AS_LONG(PyTuple_GET_ITEM(digits, i))); for (long i = 0; i < exp; i++) *p++ = '0'; *p = 0; } } else if (-exp < count) { // (1 2 3) exp = -2 --> 1.23 : prec = 3, scale = 2 len = sign + count + 2; // 2: decimal + NULL pch = (char*)pyodbc_malloc((size_t)len); if (pch) { char* p = pch; if (sign) *p++ = '-'; int i = 0; for (; i < (count + exp); i++) *p++ = (char)('0' + PyInt_AS_LONG(PyTuple_GET_ITEM(digits, i))); *p++ = '.'; for (; i < count; i++) *p++ = (char)('0' + PyInt_AS_LONG(PyTuple_GET_ITEM(digits, i))); *p++ = 0; } } else { // (1 2 3) exp = -5 --> 0.00123 : prec = 5, scale = 5 len = sign + -exp + 3; // 3: leading zero + decimal + NULL pch = (char*)pyodbc_malloc((size_t)len); if (pch) { char* p = pch; if (sign) *p++ = '-'; *p++ = '0'; *p++ = '.'; for (int i = 0; i < -(exp + count); i++) *p++ = '0'; for (int i = 0; i < count; i++) *p++ = (char)('0' + PyInt_AS_LONG(PyTuple_GET_ITEM(digits, i))); *p++ = 0; } } I(pch == 0 || (int)(strlen(pch) + 1) == len); return pch; } static bool GetUUIDInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, PyObject* uuid_type) { // uuid_type: This is a new reference that we are responsible for freeing. Object tmp(uuid_type); info.ValueType = SQL_C_GUID; info.ParameterType = SQL_GUID; info.ColumnSize = 16; info.allocated = true; info.ParameterValuePtr = pyodbc_malloc(sizeof(SQLGUID)); if (!info.ParameterValuePtr) { PyErr_NoMemory(); return false; } // Do we need to use "bytes" on a big endian machine? Object b(PyObject_GetAttrString(param, "bytes_le")); if (!b) return false; memcpy(info.ParameterValuePtr, PyBytes_AS_STRING(b.Get()), sizeof(SQLGUID)); info.StrLen_or_Ind = sizeof(SQLGUID); return true; } static bool GetDecimalInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, PyObject* decimal_type) { // decimal_type: This is a new reference that we are responsible for freeing. Object tmp(decimal_type); // The NUMERIC structure never works right with SQL Server and probably a lot of other drivers. We'll bind as a // string. Unfortunately, the Decimal class doesn't seem to have a way to force it to return a string without // exponents, so we'll have to build it ourselves. Object t(PyObject_CallMethod(param, "as_tuple", 0)); if (!t) return false; long sign = PyInt_AsLong(PyTuple_GET_ITEM(t.Get(), 0)); PyObject* digits = PyTuple_GET_ITEM(t.Get(), 1); long exp = PyInt_AsLong(PyTuple_GET_ITEM(t.Get(), 2)); Py_ssize_t count = PyTuple_GET_SIZE(digits); info.ValueType = SQL_C_CHAR; info.ParameterType = SQL_NUMERIC; if (exp >= 0) { // (1 2 3) exp = 2 --> '12300' info.ColumnSize = (SQLUINTEGER)count + exp; info.DecimalDigits = 0; } else if (-exp <= count) { // (1 2 3) exp = -2 --> 1.23 : prec = 3, scale = 2 info.ColumnSize = (SQLUINTEGER)count; info.DecimalDigits = (SQLSMALLINT)-exp; } else { // (1 2 3) exp = -5 --> 0.00123 : prec = 5, scale = 5 info.ColumnSize = (SQLUINTEGER)(-exp); info.DecimalDigits = (SQLSMALLINT)info.ColumnSize; } I(info.ColumnSize >= (SQLULEN)info.DecimalDigits); info.ParameterValuePtr = CreateDecimalString(sign, digits, exp); if (!info.ParameterValuePtr) { PyErr_NoMemory(); return false; } info.allocated = true; info.StrLen_or_Ind = (SQLINTEGER)strlen((char*)info.ParameterValuePtr); return true; } #if PY_MAJOR_VERSION < 3 static bool GetBufferInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { info.ValueType = SQL_C_BINARY; const char* pb; Py_ssize_t cb = PyBuffer_GetMemory(param, &pb); SQLLEN maxlength = cur->cnxn->GetMaxLength(info.ValueType); if (maxlength == 0 || cb <= maxlength) { // There is one segment, so we can bind directly into the buffer object. info.ParameterType = SQL_VARBINARY; info.ParameterValuePtr = (SQLPOINTER)pb; info.BufferLength = (SQLINTEGER)cb; info.ColumnSize = (SQLUINTEGER)max(cb, 1); info.StrLen_or_Ind = (SQLINTEGER)cb; } else { // There are multiple segments, so we'll provide the data at execution time. Pass the PyObject pointer as // the parameter value which will be pased back to us when the data is needed. (If we release threads, we // need to up the refcount!) info.ParameterType = SQL_LONGVARBINARY; info.ParameterValuePtr = &info; info.BufferLength = sizeof(ParamInfo*); info.ColumnSize = (SQLUINTEGER)PyBuffer_Size(param); info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)PyBuffer_Size(param)) : SQL_DATA_AT_EXEC; info.pObject = param; Py_INCREF(info.pObject); info.maxlength = maxlength; } return true; } #endif #if PY_VERSION_HEX >= 0x02060000 static bool GetByteArrayInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { info.ValueType = SQL_C_BINARY; Py_ssize_t cb = PyByteArray_Size(param); SQLLEN maxlength = cur->cnxn->GetMaxLength(info.ValueType); if (maxlength == 0 || cb <= maxlength || isTVP) { info.ParameterType = SQL_VARBINARY; info.ParameterValuePtr = (SQLPOINTER)PyByteArray_AsString(param); info.BufferLength = (SQLINTEGER)cb; info.ColumnSize = isTVP?0:(SQLUINTEGER)max(cb, 1); info.StrLen_or_Ind = (SQLINTEGER)cb; } else { info.ParameterType = SQL_LONGVARBINARY; info.ParameterValuePtr = &info; info.BufferLength = sizeof(ParamInfo*); info.ColumnSize = (SQLUINTEGER)cb; info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)cb) : SQL_DATA_AT_EXEC; info.pObject = param; Py_INCREF(info.pObject); info.maxlength = maxlength; } return true; } #endif // TVP static bool GetTableInfo(Cursor *cur, Py_ssize_t index, PyObject* param, ParamInfo& info) { int nskip = 0; Py_ssize_t nrows = PySequence_Size(param); if (nrows > 0) { PyObject *cell0 = PySequence_GetItem(param, 0); Py_XDECREF(cell0); if (PyBytes_Check(cell0) || PyUnicode_Check(cell0)) { nskip++; if (nrows > 1) { PyObject *cell1 = PySequence_GetItem(param, 1); Py_XDECREF(cell1); nskip += (PyBytes_Check(cell1) || PyUnicode_Check(cell1)); } } } nrows -= nskip; if (!nskip) { // Need to describe in order to fill in IPD with the TVP's type name, because user has not provided it SQLSMALLINT tvptype; SQLDescribeParam(cur->hstmt, index + 1, &tvptype, 0, 0, 0); } info.pObject = param; Py_INCREF(param); info.ValueType = SQL_C_BINARY; info.ParameterType = SQL_SS_TABLE; info.ColumnSize = nrows; info.DecimalDigits = 0; info.ParameterValuePtr = &info; info.BufferLength = 0; info.curTvpRow = nskip; info.StrLen_or_Ind = SQL_DATA_AT_EXEC; info.allocated = false; return true; } bool GetParameterInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { // Determines the type of SQL parameter that will be used for this parameter based on the Python data type. // // Populates `info`. if (param == Py_None) return GetNullInfo(cur, index, info); if (param == null_binary) return GetNullBinaryInfo(cur, index, info); #if PY_MAJOR_VERSION >= 3 if (PyBytes_Check(param)) return GetBytesInfo(cur, index, param, info, isTVP); #else if (PyBytes_Check(param)) return GetStrInfo(cur, index, param, info, isTVP); #endif if (PyUnicode_Check(param)) return GetUnicodeInfo(cur, index, param, info, isTVP); if (PyBool_Check(param)) return GetBooleanInfo(cur, index, param, info); if (PyDateTime_Check(param)) return GetDateTimeInfo(cur, index, param, info); if (PyDate_Check(param)) return GetDateInfo(cur, index, param, info); if (PyTime_Check(param)) return GetTimeInfo(cur, index, param, info); if (PyLong_Check(param)) return GetLongInfo(cur, index, param, info, isTVP); if (PyFloat_Check(param)) return GetFloatInfo(cur, index, param, info); #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(param)) return GetByteArrayInfo(cur, index, param, info, isTVP); #endif #if PY_MAJOR_VERSION < 3 if (PyInt_Check(param)) return GetIntInfo(cur, index, param, info, isTVP); if (PyBuffer_Check(param)) return GetBufferInfo(cur, index, param, info); #endif // Decimal PyObject* cls = 0; if (!IsInstanceForThread(param, "decimal", "Decimal", &cls)) return false; if (cls != 0) return GetDecimalInfo(cur, index, param, info, cls); // UUID if (!IsInstanceForThread(param, "uuid", "UUID", &cls)) return false; if (cls != 0) return GetUUIDInfo(cur, index, param, info, cls); if (PySequence_Check(param)) return GetTableInfo(cur, index, param, info); RaiseErrorV("HY105", ProgrammingError, "Invalid parameter type. param-index=%zd param-type=%s", index, Py_TYPE(param)->tp_name); return false; } static bool getObjectValue(PyObject *pObject, long& nValue) { if (pObject == NULL) return false; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(pObject)) { nValue = PyInt_AS_LONG(pObject); return true; } #endif if (PyLong_Check(pObject)) { nValue = PyLong_AsLong(pObject); return true; } return false; } static long getSequenceValue(PyObject *pSequence, Py_ssize_t nIndex, long nDefault, bool &bChanged) { PyObject *obj; long v = nDefault; obj = PySequence_GetItem(pSequence, nIndex); if (obj != NULL) { if (getObjectValue(obj, v)) bChanged = true; } Py_CLEAR(obj); return v; } /** * UpdateParamInfo updates the current columnsizes with the information provided * by a set from the client code, to manually override values returned by SQLDescribeParam() * which can be wrong in case of SQL Server statements. * * sparhawk@gmx.at (Gerhard Gruber) */ static bool UpdateParamInfo(Cursor* pCursor, Py_ssize_t nIndex, ParamInfo *pInfo) { if (pCursor->inputsizes == NULL || nIndex >= PySequence_Length(pCursor->inputsizes)) return false; PyObject *desc = PySequence_GetItem(pCursor->inputsizes, nIndex); if (desc == NULL) return false; bool rc = false; long v; bool clearError = true; // If the error was already set before we entered here, it is not from us, so we leave it alone. if (PyErr_Occurred()) clearError = false; // integer - sets colsize // type object - sets sqltype (mapping between Python and SQL types is not 1:1 so it may not always work) // Consider: sequence of (colsize, sqltype, scale) if (getObjectValue(desc, v)) { pInfo->ColumnSize = (SQLULEN)v; rc = true; } else if (PySequence_Check(desc)) { pInfo->ParameterType = (SQLSMALLINT)getSequenceValue(desc, 0, (long)pInfo->ParameterType, rc); pInfo->ColumnSize = (SQLUINTEGER)getSequenceValue(desc, 1, (long)pInfo->ColumnSize, rc); pInfo->DecimalDigits = (SQLSMALLINT)getSequenceValue(desc, 2, (long)pInfo->ColumnSize, rc); } Py_CLEAR(desc); // If the user didn't provide the full array (in case he gave us an array), the above code would // set an internal error on the cursor object, as we try to read three values from an array // which may not have as many. This is ok, because we don't really care if the array is not completly // specified, so we clear the error in case it comes from this. If the error was already present before that // we keep it, so the user can handle it. if (clearError) PyErr_Clear(); return rc; } bool BindParameter(Cursor* cur, Py_ssize_t index, ParamInfo& info) { SQLSMALLINT sqltype = info.ParameterType; SQLULEN colsize = info.ColumnSize; SQLSMALLINT scale = info.DecimalDigits; if (UpdateParamInfo(cur, index, &info)) { // Reload in case it has changed. colsize = info.ColumnSize; sqltype = info.ParameterType; scale = info.DecimalDigits; } TRACE("BIND: param=%ld ValueType=%d (%s) ParameterType=%d (%s) ColumnSize=%ld DecimalDigits=%d BufferLength=%ld *pcb=%ld\n", (index+1), info.ValueType, CTypeName(info.ValueType), sqltype, SqlTypeName(sqltype), colsize, scale, info.BufferLength, info.StrLen_or_Ind); SQLRETURN ret = -1; Py_BEGIN_ALLOW_THREADS ret = SQLBindParameter(cur->hstmt, (SQLUSMALLINT)(index + 1), SQL_PARAM_INPUT, info.ValueType, sqltype, colsize, scale, sqltype == SQL_SS_TABLE ? 0 : info.ParameterValuePtr, info.BufferLength, &info.StrLen_or_Ind); Py_END_ALLOW_THREADS; if (GetConnection(cur)->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cur->cnxn, "SQLBindParameter", GetConnection(cur)->hdbc, cur->hstmt); return false; } // This is a TVP. Enter and bind its parameters, allocate descriptors for its columns (all as DAE) if (sqltype == SQL_SS_TABLE) { Py_ssize_t nrows = PySequence_Size(info.pObject); if (nrows > 0) { PyObject *cell0 = PySequence_GetItem(info.pObject, 0); Py_XDECREF(cell0); if (PyBytes_Check(cell0) || PyUnicode_Check(cell0)) { SQLHDESC desc; PyObject *tvpname = PyCodec_Encode(cell0, "UTF-16LE", 0); SQLGetStmtAttr(cur->hstmt, SQL_ATTR_IMP_PARAM_DESC, &desc, 0, 0); SQLSetDescFieldW(desc, index + 1, SQL_CA_SS_TYPE_NAME, (SQLPOINTER)PyBytes_AsString(tvpname), PyBytes_Size(tvpname)); Py_XDECREF(tvpname); if (nrows > 1) { PyObject *cell1 = PySequence_GetItem(info.pObject, 1); Py_XDECREF(cell1); if (PyBytes_Check(cell1) || PyUnicode_Check(cell1)) { PyObject *tvpschema = PyCodec_Encode(cell1, "UTF-16LE", 0); SQLSetDescFieldW(desc, index + 1, SQL_CA_SS_SCHEMA_NAME, (SQLPOINTER)PyBytes_AsString(tvpschema), PyBytes_Size(tvpschema)); Py_XDECREF(tvpschema); } } } } SQLHDESC desc; SQLGetStmtAttr(cur->hstmt, SQL_ATTR_APP_PARAM_DESC, &desc, 0, 0); SQLSetDescField(desc, index + 1, SQL_DESC_DATA_PTR, (SQLPOINTER)info.ParameterValuePtr, 0); int err = 0; ret = SQLSetStmtAttr(cur->hstmt, SQL_SOPT_SS_PARAM_FOCUS, (SQLPOINTER)(index + 1), SQL_IS_INTEGER); if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cur->cnxn, "SQLSetStmtAttr", GetConnection(cur)->hdbc, cur->hstmt); return false; } Py_ssize_t i = PySequence_Size(info.pObject) - info.ColumnSize; Py_ssize_t ncols = 0; while (i >= 0 && i < PySequence_Size(info.pObject)) { PyObject *row = PySequence_GetItem(info.pObject, i); Py_XDECREF(row); if (!PySequence_Check(row)) { RaiseErrorV(0, ProgrammingError, "A TVP's rows must be Sequence objects."); err = 1; break; } if (ncols && ncols != PySequence_Size(row)) { RaiseErrorV(0, ProgrammingError, "A TVP's rows must all be the same size."); err = 1; break; } ncols = PySequence_Size(row); i++; } if (!ncols) { // TVP has no columns --- is null info.nested = 0; info.StrLen_or_Ind = SQL_DEFAULT_PARAM; } else { PyObject *row = PySequence_GetItem(info.pObject, PySequence_Size(info.pObject) - info.ColumnSize); Py_XDECREF(row); info.nested = (ParamInfo*)pyodbc_malloc(ncols * sizeof(ParamInfo)); info.maxlength = ncols; memset(info.nested, 0, ncols * sizeof(ParamInfo)); for(i=0;ihstmt, (SQLUSMALLINT)(i + 1), SQL_PARAM_INPUT, info.nested[i].ValueType, info.nested[i].ParameterType, info.nested[i].ColumnSize, info.nested[i].DecimalDigits, info.nested + i, info.nested[i].BufferLength, &info.nested[i].StrLen_or_Ind); Py_END_ALLOW_THREADS; if (GetConnection(cur)->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cur->cnxn, "SQLBindParameter", GetConnection(cur)->hdbc, cur->hstmt); return false; } } } ret = SQLSetStmtAttr(cur->hstmt, SQL_SOPT_SS_PARAM_FOCUS, 0, SQL_IS_INTEGER); if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cur->cnxn, "SQLSetStmtAttr", GetConnection(cur)->hdbc, cur->hstmt); return false; } if (err) return false; } return true; } void FreeParameterData(Cursor* cur) { // Unbinds the parameters and frees the parameter buffer. if (cur->paramInfos) { // MS ODBC will crash if we use an HSTMT after the HDBC has been freed. if (cur->cnxn->hdbc != SQL_NULL_HANDLE) { Py_BEGIN_ALLOW_THREADS SQLFreeStmt(cur->hstmt, SQL_RESET_PARAMS); Py_END_ALLOW_THREADS } FreeInfos(cur->paramInfos, cur->paramcount); cur->paramInfos = 0; } } void FreeParameterInfo(Cursor* cur) { // Internal function to free just the cached parameter information. This is not used by the general cursor code // since this information is also freed in the less granular free_results function that clears everything. Py_XDECREF(cur->pPreparedSQL); pyodbc_free(cur->paramtypes); cur->pPreparedSQL = 0; cur->paramtypes = 0; cur->paramcount = 0; } bool Prepare(Cursor* cur, PyObject* pSql) { #if PY_MAJOR_VERSION >= 3 if (!PyUnicode_Check(pSql)) { PyErr_SetString(PyExc_TypeError, "SQL must be a Unicode string"); return false; } #endif // // Prepare the SQL if necessary. // if (pSql != cur->pPreparedSQL) { FreeParameterInfo(cur); SQLRETURN ret = 0; SQLSMALLINT cParamsT = 0; const char* szErrorFunc = "SQLPrepare"; const TextEnc* penc; #if PY_MAJOR_VERSION < 3 if (PyBytes_Check(pSql)) { penc = &cur->cnxn->str_enc; } else #endif { penc = &cur->cnxn->unicode_enc; } Object query(penc->Encode(pSql)); if (!query) return 0; bool isWide = (penc->ctype == SQL_C_WCHAR); const char* pch = PyBytes_AS_STRING(query.Get()); SQLINTEGER cch = (SQLINTEGER)(PyBytes_GET_SIZE(query.Get()) / (isWide ? sizeof(ODBCCHAR) : 1)); TRACE("SQLPrepare(%s)\n", pch); Py_BEGIN_ALLOW_THREADS if (isWide) ret = SQLPrepareW(cur->hstmt, (SQLWCHAR*)pch, cch); else ret = SQLPrepare(cur->hstmt, (SQLCHAR*)pch, cch); if (SQL_SUCCEEDED(ret)) { szErrorFunc = "SQLNumParams"; ret = SQLNumParams(cur->hstmt, &cParamsT); } Py_END_ALLOW_THREADS if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); return false; } if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(cur->cnxn, szErrorFunc, GetConnection(cur)->hdbc, cur->hstmt); return false; } cur->paramcount = (int)cParamsT; cur->pPreparedSQL = pSql; Py_INCREF(cur->pPreparedSQL); } return true; } bool PrepareAndBind(Cursor* cur, PyObject* pSql, PyObject* original_params, bool skip_first) { // // Normalize the parameter variables. // // Since we may replace parameters (we replace objects with Py_True/Py_False when writing to a bit/bool column), // allocate an array and use it instead of the original sequence int params_offset = skip_first ? 1 : 0; Py_ssize_t cParams = original_params == 0 ? 0 : PySequence_Length(original_params) - params_offset; if (!Prepare(cur, pSql)) return false; if (cParams != cur->paramcount) { RaiseErrorV(0, ProgrammingError, "The SQL contains %d parameter markers, but %d parameters were supplied", cur->paramcount, cParams); return false; } cur->paramInfos = (ParamInfo*)pyodbc_malloc(sizeof(ParamInfo) * cParams); if (cur->paramInfos == 0) { PyErr_NoMemory(); return 0; } memset(cur->paramInfos, 0, sizeof(ParamInfo) * cParams); // Since you can't call SQLDesribeParam *after* calling SQLBindParameter, we'll loop through all of the // GetParameterInfos first, then bind. for (Py_ssize_t i = 0; i < cParams; i++) { Object param(PySequence_GetItem(original_params, i + params_offset)); if (!GetParameterInfo(cur, i, param, cur->paramInfos[i], false)) { FreeInfos(cur->paramInfos, cParams); cur->paramInfos = 0; return false; } } for (Py_ssize_t i = 0; i < cParams; i++) { if (!BindParameter(cur, i, cur->paramInfos[i])) { FreeInfos(cur->paramInfos, cParams); cur->paramInfos = 0; return false; } } return true; } bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj) { bool ret = true; char *szLastFunction = 0; SQLRETURN rc = SQL_SUCCESS; if (!Prepare(cur, pSql)) return false; if (!(cur->paramInfos = (ParamInfo*)pyodbc_malloc(sizeof(ParamInfo) * cur->paramcount))) { PyErr_NoMemory(); return 0; } memset(cur->paramInfos, 0, sizeof(ParamInfo) * cur->paramcount); // Describe each parameter (SQL type) in preparation for allocation of paramset array for (Py_ssize_t i = 0; i < cur->paramcount; i++) { SQLSMALLINT nullable; if (!SQL_SUCCEEDED(SQLDescribeParam(cur->hstmt, i + 1, &(cur->paramInfos[i].ParameterType), &cur->paramInfos[i].ColumnSize, &cur->paramInfos[i].DecimalDigits, &nullable))) { // Default to a medium-length varchar if describing the parameter didn't work cur->paramInfos[i].ParameterType = SQL_VARCHAR; cur->paramInfos[i].ColumnSize = 255; cur->paramInfos[i].DecimalDigits = 0; } // This supports overriding of input sizes via setinputsizes // See issue 380 // The logic is duplicated from BindParameter UpdateParamInfo(cur, i, &cur->paramInfos[i]); } PyObject *rowseq = PySequence_Fast(paramArrayObj, "Parameter array must be a sequence."); if (!rowseq) { ErrorRet1: if (cur->paramInfos) FreeInfos(cur->paramInfos, cur->paramcount); cur->paramInfos = 0; return false; } Py_ssize_t rowcount = PySequence_Fast_GET_SIZE(rowseq); PyObject **rowptr = PySequence_Fast_ITEMS(rowseq); Py_ssize_t r = 0; while ( r < rowcount ) { // Scan current row to determine C types PyObject *currow = *rowptr++; if (!PyTuple_Check(currow) && !PyList_Check(currow) && !Row_Check(currow)) { RaiseErrorV(0, PyExc_TypeError, "Params must be in a list, tuple, or Row"); ErrorRet2: Py_XDECREF(rowseq); goto ErrorRet1; } PyObject *colseq = PySequence_Fast(currow, "Row must be a sequence."); if (!colseq) { goto ErrorRet2; } if (PySequence_Fast_GET_SIZE(colseq) != cur->paramcount) { RaiseErrorV(0, ProgrammingError, "Expected %u parameters, supplied %u", cur->paramcount, PySequence_Fast_GET_SIZE(colseq)); ErrorRet3: Py_XDECREF(colseq); goto ErrorRet2; } PyObject **cells = PySequence_Fast_ITEMS(colseq); // Start at a non-zero offset to prevent null pointer detection. char *bindptr = (char*)16; Py_ssize_t i = 0; for (; i < cur->paramcount; i++) { if (!DetectCType(cells[i], &cur->paramInfos[i])) { goto ErrorRet3; } if (!SQL_SUCCEEDED(SQLBindParameter(cur->hstmt, i + 1, SQL_PARAM_INPUT, cur->paramInfos[i].ValueType, cur->paramInfos[i].ParameterType, cur->paramInfos[i].ColumnSize, cur->paramInfos[i].DecimalDigits, bindptr, cur->paramInfos[i].BufferLength, (SQLLEN*)(bindptr + cur->paramInfos[i].BufferLength)))) { RaiseErrorFromHandle(cur->cnxn, "SQLBindParameter", GetConnection(cur)->hdbc, cur->hstmt); ErrorRet4: SQLFreeStmt(cur->hstmt, SQL_RESET_PARAMS); goto ErrorRet3; } if (cur->paramInfos[i].ValueType == SQL_C_NUMERIC) { SQLHDESC desc; SQLGetStmtAttr(cur->hstmt, SQL_ATTR_APP_PARAM_DESC, &desc, 0, 0); SQLSetDescField(desc, i + 1, SQL_DESC_TYPE, (SQLPOINTER)SQL_C_NUMERIC, 0); SQLSetDescField(desc, i + 1, SQL_DESC_PRECISION, (SQLPOINTER)cur->paramInfos[i].ColumnSize, 0); SQLSetDescField(desc, i + 1, SQL_DESC_SCALE, (SQLPOINTER)(uintptr_t)cur->paramInfos[i].DecimalDigits, 0); SQLSetDescField(desc, i + 1, SQL_DESC_DATA_PTR, bindptr, 0); } bindptr += cur->paramInfos[i].BufferLength + sizeof(SQLLEN); } Py_ssize_t rowlen = bindptr - (char*)16; // Assume parameters are homogeneous between rows in the common case, to avoid // another rescan for determining the array height. // Subtract number of rows processed as an upper bound. if (!(cur->paramArray = (unsigned char*)pyodbc_malloc(rowlen * (rowcount - r)))) { PyErr_NoMemory(); goto ErrorRet4; } unsigned char *pParamDat = cur->paramArray; Py_ssize_t rows_converted = 0; ParamInfo *pi; for (;;) { // Column loop. pi = &cur->paramInfos[0]; for (int c = 0; c < cur->paramcount; c++, pi++) { if (!PyToCType(cur, &pParamDat, *cells++, pi)) { // "schema change" or conversion error. Try again on next batch. rowptr--; Py_XDECREF(colseq); colseq = 0; // Finish this batch of rows and attempt to execute before starting another. goto DoExecute; } } rows_converted++; Py_XDECREF(colseq); colseq = 0; r++; if ( r >= rowcount ) { break; } currow = *rowptr++; colseq = PySequence_Fast(currow, "Row must be a sequence."); if (!colseq) { ErrorRet5: pyodbc_free(cur->paramArray); cur->paramArray = 0; goto ErrorRet4; } if (PySequence_Fast_GET_SIZE(colseq) != cur->paramcount) { RaiseErrorV(0, ProgrammingError, "Expected %u parameters, supplied %u", cur->paramcount, PySequence_Fast_GET_SIZE(colseq)); Py_XDECREF(colseq); goto ErrorRet5; } cells = PySequence_Fast_ITEMS(colseq); } DoExecute: if (!rows_converted || PyErr_Occurred()) { if (!PyErr_Occurred()) RaiseErrorV(0, ProgrammingError, "No suitable conversion for one or more parameters."); goto ErrorRet5; } SQLULEN bop = (SQLULEN)(cur->paramArray) - 16; if (!SQL_SUCCEEDED(SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAM_BIND_TYPE, (SQLPOINTER)rowlen, SQL_IS_UINTEGER))) { RaiseErrorFromHandle(cur->cnxn, "SQLSetStmtAttr", GetConnection(cur)->hdbc, cur->hstmt); ErrorRet6: SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAM_BIND_TYPE, SQL_BIND_BY_COLUMN, SQL_IS_UINTEGER); goto ErrorRet5; } if (!SQL_SUCCEEDED(SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)rows_converted, SQL_IS_UINTEGER))) { RaiseErrorFromHandle(cur->cnxn, "SQLSetStmtAttr", GetConnection(cur)->hdbc, cur->hstmt); goto ErrorRet6; } if (!SQL_SUCCEEDED(SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAM_BIND_OFFSET_PTR, (SQLPOINTER)&bop, SQL_IS_POINTER))) { RaiseErrorFromHandle(cur->cnxn, "SQLSetStmtAttr", GetConnection(cur)->hdbc, cur->hstmt); ErrorRet7: SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)1, SQL_IS_UINTEGER); goto ErrorRet6; } // The code below was copy-pasted from cursor.cpp's execute() for convenience. // TODO: REFACTOR if there is possibility to reuse (maybe not, because DAE structure is different) Py_BEGIN_ALLOW_THREADS rc = SQLExecute(cur->hstmt); Py_END_ALLOW_THREADS if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { // The connection was closed by another thread in the ALLOW_THREADS block above. RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); ErrorRet8: FreeParameterData(cur); SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAM_BIND_OFFSET_PTR, 0, SQL_IS_POINTER); goto ErrorRet7; } if (!SQL_SUCCEEDED(rc) && rc != SQL_NEED_DATA && rc != SQL_NO_DATA) { // We could try dropping through the while and if below, but if there is an error, we need to raise it before // FreeParameterData calls more ODBC functions. RaiseErrorFromHandle(cur->cnxn, "SQLExecute", cur->cnxn->hdbc, cur->hstmt); goto ErrorRet8; } // TODO: Refactor into ProcessDAEParams() ? while (rc == SQL_NEED_DATA) { // One or more parameters were too long to bind normally so we set the // length to SQL_LEN_DATA_AT_EXEC. ODBC will return SQL_NEED_DATA for // each of the parameters we did this for. // // For each one we set a pointer to the ParamInfo as the "parameter // data" we can access with SQLParamData. We've stashed everything we // need in there. szLastFunction = "SQLParamData"; DAEParam *pInfo; Py_BEGIN_ALLOW_THREADS rc = SQLParamData(cur->hstmt, (SQLPOINTER*)&pInfo); Py_END_ALLOW_THREADS if (rc != SQL_NEED_DATA && rc != SQL_NO_DATA && !SQL_SUCCEEDED(rc)) return RaiseErrorFromHandle(cur->cnxn, "SQLParamData", cur->cnxn->hdbc, cur->hstmt) != NULL; TRACE("SQLParamData() --> %d\n", rc); if (rc == SQL_NEED_DATA) { PyObject* objCell = pInfo->cell; // If the object is Unicode it needs to be converted into bytes before it can be used by SQLPutData if (PyUnicode_Check(objCell)) { const TextEnc& enc = cur->cnxn->sqlwchar_enc; int cb = PyUnicode_GET_DATA_SIZE(objCell) / 2; PyObject* bytes = NULL; const Py_UNICODE* source = PyUnicode_AS_UNICODE(objCell); switch (enc.optenc) { case OPTENC_UTF8: bytes = PyUnicode_EncodeUTF8(source, cb, "strict"); break; case OPTENC_UTF16: bytes = PyUnicode_EncodeUTF16(source, cb, "strict", BYTEORDER_NATIVE); break; case OPTENC_UTF16LE: bytes = PyUnicode_EncodeUTF16(source, cb, "strict", BYTEORDER_LE); break; case OPTENC_UTF16BE: bytes = PyUnicode_EncodeUTF16(source, cb, "strict", BYTEORDER_BE); break; } if (bytes && PyBytes_Check(bytes)) { objCell = bytes; } } szLastFunction = "SQLPutData"; if (PyBytes_Check(objCell) #if PY_VERSION_HEX >= 0x02060000 || PyByteArray_Check(objCell) #endif ) { char *(*pGetPtr)(PyObject*); Py_ssize_t (*pGetLen)(PyObject*); #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(objCell)) { pGetPtr = PyByteArray_AsString; pGetLen = PyByteArray_Size; } else #endif { pGetPtr = PyBytes_AsString; pGetLen = PyBytes_Size; } const char* p = pGetPtr(objCell); SQLLEN cb = (SQLLEN)pGetLen(objCell); SQLLEN offset = 0; do { SQLLEN remaining = min(pInfo->maxlen, cb - offset); TRACE("SQLPutData [%d] (%d) %.10s\n", offset, remaining, &p[offset]); Py_BEGIN_ALLOW_THREADS rc = SQLPutData(cur->hstmt, (SQLPOINTER)&p[offset], remaining); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(rc)) return RaiseErrorFromHandle(cur->cnxn, "SQLPutData", cur->cnxn->hdbc, cur->hstmt) != NULL; offset += remaining; } while (offset < cb); if (PyUnicode_Check(pInfo->cell) && PyBytes_Check(objCell)) { Py_XDECREF(objCell); } } #if PY_MAJOR_VERSION < 3 else if (PyBuffer_Check(objCell)) { // Buffers can have multiple segments, so we might need multiple writes. Looping through buffers isn't // difficult, but we've wrapped it up in an iterator object to keep this loop simple. BufferSegmentIterator it(objCell); byte* pb; SQLLEN cb; while (it.Next(pb, cb)) { Py_BEGIN_ALLOW_THREADS rc = SQLPutData(cur->hstmt, pb, cb); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(rc)) return RaiseErrorFromHandle(cur->cnxn, "SQLPutData", cur->cnxn->hdbc, cur->hstmt) != NULL; } } #endif Py_XDECREF(pInfo->cell); rc = SQL_NEED_DATA; } } if (!SQL_SUCCEEDED(rc) && rc != SQL_NO_DATA) return RaiseErrorFromHandle(cur->cnxn, szLastFunction, cur->cnxn->hdbc, cur->hstmt) != NULL; SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)1, SQL_IS_UINTEGER); SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAM_BIND_OFFSET_PTR, 0, SQL_IS_POINTER); pyodbc_free(cur->paramArray); cur->paramArray = 0; } Py_XDECREF(rowseq); FreeParameterData(cur); return ret; } static bool GetParamType(Cursor* cur, Py_ssize_t index, SQLSMALLINT& type) { // Returns the ODBC type of the of given parameter. // // Normally we set the parameter type based on the parameter's Python object type (e.g. str --> SQL_CHAR), so this // is only called when the parameter is None. In that case, we can't guess the type and have to use // SQLDescribeParam. // // If the database doesn't support SQLDescribeParam, we return SQL_VARCHAR since it converts to most other types. // However, it will not usually work if the target column is a binary column. if (!GetConnection(cur)->supports_describeparam || cur->paramcount == 0) { type = SQL_VARCHAR; return true; } if (cur->paramtypes == 0) { cur->paramtypes = reinterpret_cast(pyodbc_malloc(sizeof(SQLSMALLINT) * cur->paramcount)); if (cur->paramtypes == 0) { PyErr_NoMemory(); return false; } // SQL_UNKNOWN_TYPE is zero, so zero out all columns since we haven't looked any up yet. memset(cur->paramtypes, 0, sizeof(SQLSMALLINT) * cur->paramcount); } if (cur->paramtypes[index] == SQL_UNKNOWN_TYPE) { SQLULEN ParameterSizePtr; SQLSMALLINT DecimalDigitsPtr; SQLSMALLINT NullablePtr; SQLRETURN ret; Py_BEGIN_ALLOW_THREADS ret = SQLDescribeParam(cur->hstmt, (SQLUSMALLINT)(index + 1), &cur->paramtypes[index], &ParameterSizePtr, &DecimalDigitsPtr, &NullablePtr); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) { // This can happen with ("select ?", None). We'll default to VARCHAR which works with most types. cur->paramtypes[index] = SQL_VARCHAR; } } type = cur->paramtypes[index]; return true; } struct NullParam { PyObject_HEAD }; PyTypeObject NullParamType = { PyVarObject_HEAD_INIT(NULL, 0) "pyodbc.NullParam", // tp_name sizeof(NullParam), // tp_basicsize 0, // tp_itemsize 0, // destructor tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare 0, // tp_repr 0, // tp_as_number 0, // tp_as_sequence 0, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, // tp_flags }; PyObject* null_binary; bool Params_init() { if (PyType_Ready(&NullParamType) < 0) return false; null_binary = (PyObject*)PyObject_New(NullParam, &NullParamType); if (null_binary == 0) return false; PyDateTime_IMPORT; return true; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/params.h0000664000175000017500000000063500000000000016652 0ustar00mkleehammermkleehammer #ifndef PARAMS_H #define PARAMS_H bool Params_init(); struct Cursor; bool PrepareAndBind(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first); bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj); bool GetParameterInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP); void FreeParameterData(Cursor* cur); void FreeParameterInfo(Cursor* cur); #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629396312.0 pyodbc-4.0.32/src/pyodbc.h0000664000175000017500000001125000000000000016642 0ustar00mkleehammermkleehammer // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef PYODBC_H #define PYODBC_H #ifdef _MSC_VER // The MS headers generate a ton of warnings. #pragma warning(push, 0) #define _CRT_SECURE_NO_WARNINGS #include #include #pragma warning(pop) typedef __int64 INT64; typedef unsigned __int64 UINT64; #else typedef unsigned char byte; typedef unsigned int UINT; typedef long long INT64; typedef unsigned long long UINT64; #define _strcmpi strcasecmp #define _strdup strdup #ifdef __MINGW32__ #include #include #else inline int max(int lhs, int rhs) { return (rhs > lhs) ? rhs : lhs; } #endif #endif #ifdef __SUN__ #include #endif #define PY_SSIZE_T_CLEAN 1 #include #include #include #include #include #include #ifdef __CYGWIN__ #include #endif #include #include #if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PyInt_AsSsize_t PyInt_AsLong #define lenfunc inquiry #define ssizeargfunc intargfunc #define ssizeobjargproc intobjargproc #endif #ifndef _countof #define _countof(a) (sizeof(a) / sizeof(a[0])) #endif #ifndef SQL_SS_TABLE #define SQL_SS_TABLE -153 #endif #ifndef SQL_SOPT_SS_PARAM_FOCUS #define SQL_SOPT_SS_PARAM_FOCUS 1236 #endif #ifndef SQL_CA_SS_TYPE_NAME #define SQL_CA_SS_TYPE_NAME 1227 #endif #ifndef SQL_CA_SS_SCHEMA_NAME #define SQL_CA_SS_SCHEMA_NAME 1226 #endif #ifndef SQL_CA_SS_CATALOG_NAME #define SQL_CA_SS_CATALOG_NAME 1225 #endif inline bool IsSet(DWORD grf, DWORD flags) { return (grf & flags) == flags; } #ifdef UNUSED #undef UNUSED #endif inline void UNUSED(...) { } #include #if defined(__SUNPRO_CC) || defined(__SUNPRO_C) || (defined(__GNUC__) && !defined(__MINGW32__)) #ifndef __FreeBSD__ #include #endif #define CDECL cdecl #define min(X,Y) ((X) < (Y) ? (X) : (Y)) #define max(X,Y) ((X) > (Y) ? (X) : (Y)) #define _alloca alloca inline void _strlwr(char* name) { while (*name) { *name = tolower(*name); name++; } } #else #define CDECL #endif #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) // Building an actual debug version of Python is so much of a pain that it never happens. I'm providing release-build // versions of assertions. #if defined(PYODBC_ASSERT) && defined(_MSC_VER) #include inline void FailAssert(const char* szFile, size_t line, const char* szExpr) { printf("assertion failed: %s(%d)\n%s\n", szFile, (int)line, szExpr); __debugbreak(); // _CrtDbgBreak(); } #define I(expr) if (!(expr)) FailAssert(__FILE__, __LINE__, #expr); #define N(expr) if (expr) FailAssert(__FILE__, __LINE__, #expr); #else #define I(expr) #define N(expr) #endif #ifdef PYODBC_TRACE void DebugTrace(const char* szFmt, ...); #else inline void DebugTrace(const char* szFmt, ...) { UNUSED(szFmt); } #endif #define TRACE DebugTrace // #ifdef PYODBC_LEAK_CHECK // #define pyodbc_malloc(len) _pyodbc_malloc(__FILE__, __LINE__, len) // void* _pyodbc_malloc(const char* filename, int lineno, size_t len); // void pyodbc_free(void* p); // void pyodbc_leak_check(); // #else #define pyodbc_malloc malloc #define pyodbc_free free // #endif // issue #880: entry missing from iODBC sqltypes.h #ifndef BYTE typedef unsigned char BYTE; #endif bool pyodbc_realloc(BYTE** pp, size_t newlen); // A wrapper around realloc with a safer interface. If it is successful, *pp is updated to the // new pointer value. If not successful, it is not modified. (It is easy to forget and lose // the old pointer value with realloc.) void PrintBytes(void* p, size_t len); const char* CTypeName(SQLSMALLINT n); const char* SqlTypeName(SQLSMALLINT n); #include "pyodbccompat.h" #define HERE printf("%s(%d)\n", __FILE__, __LINE__) #endif // pyodbc_h ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629144457.0 pyodbc-4.0.32/src/pyodbc.pyi0000664000175000017500000003145600000000000017226 0ustar00mkleehammermkleehammerfrom __future__ import annotations from typing import Any, Callable, Dict, Generator, Iterator, List, Optional, Sequence, Tuple, Union # SQLSetConnectAttr attributes # ref: https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlsetconnectattr-function SQL_ATTR_ACCESS_MODE: int SQL_ATTR_AUTOCOMMIT: int SQL_ATTR_CURRENT_CATALOG: int SQL_ATTR_LOGIN_TIMEOUT: int SQL_ATTR_ODBC_CURSORS: int SQL_ATTR_QUIET_MODE: int SQL_ATTR_TRACE: int SQL_ATTR_TRACEFILE: int SQL_ATTR_TRANSLATE_LIB: int SQL_ATTR_TRANSLATE_OPTION: int SQL_ATTR_TXN_ISOLATION: int # other (e.g. specific to certain RDBMSs) SQL_ACCESS_MODE: int SQL_AUTOCOMMIT: int SQL_CURRENT_QUALIFIER: int SQL_LOGIN_TIMEOUT: int SQL_ODBC_CURSORS: int SQL_OPT_TRACE: int SQL_OPT_TRACEFILE: int SQL_PACKET_SIZE: int SQL_QUIET_MODE: int SQL_TRANSLATE_DLL: int SQL_TRANSLATE_OPTION: int SQL_TXN_ISOLATION: int # Unicode SQL_ATTR_ANSI_APP: int # ODBC column data types # https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/appendix-d-data-types SQL_UNKNOWN_TYPE: int SQL_CHAR: int SQL_VARCHAR: int SQL_LONGVARCHAR: int SQL_WCHAR: int SQL_WVARCHAR: int SQL_WLONGVARCHAR: int SQL_DECIMAL: int SQL_NUMERIC: int SQL_SMALLINT: int SQL_INTEGER: int SQL_REAL: int SQL_FLOAT: int SQL_DOUBLE: int SQL_BIT: int SQL_TINYINT: int SQL_BIGINT: int SQL_BINARY: int SQL_VARBINARY: int SQL_LONGVARBINARY: int SQL_TYPE_DATE: int SQL_TYPE_TIME: int SQL_TYPE_TIMESTAMP: int SQL_SS_TIME2: int SQL_SS_XML: int SQL_INTERVAL_MONTH: int SQL_INTERVAL_YEAR: int SQL_INTERVAL_YEAR_TO_MONTH: int SQL_INTERVAL_DAY: int SQL_INTERVAL_HOUR: int SQL_INTERVAL_MINUTE: int SQL_INTERVAL_SECOND: int SQL_INTERVAL_DAY_TO_HOUR: int SQL_INTERVAL_DAY_TO_MINUTE: int SQL_INTERVAL_DAY_TO_SECOND: int SQL_INTERVAL_HOUR_TO_MINUTE: int SQL_INTERVAL_HOUR_TO_SECOND: int SQL_INTERVAL_MINUTE_TO_SECOND: int SQL_GUID: int # SQLDescribeCol SQL_NO_NULLS: int SQL_NULLABLE: int SQL_NULLABLE_UNKNOWN: int # specific to pyodbc SQL_WMETADATA: int # SQL_CONVERT_X SQL_CONVERT_FUNCTIONS: int SQL_CONVERT_BIGINT: int SQL_CONVERT_BINARY: int SQL_CONVERT_BIT: int SQL_CONVERT_CHAR: int SQL_CONVERT_DATE: int SQL_CONVERT_DECIMAL: int SQL_CONVERT_DOUBLE: int SQL_CONVERT_FLOAT: int SQL_CONVERT_GUID: int SQL_CONVERT_INTEGER: int SQL_CONVERT_INTERVAL_DAY_TIME: int SQL_CONVERT_INTERVAL_YEAR_MONTH: int SQL_CONVERT_LONGVARBINARY: int SQL_CONVERT_LONGVARCHAR: int SQL_CONVERT_NUMERIC: int SQL_CONVERT_REAL: int SQL_CONVERT_SMALLINT: int SQL_CONVERT_TIME: int SQL_CONVERT_TIMESTAMP: int SQL_CONVERT_TINYINT: int SQL_CONVERT_VARBINARY: int SQL_CONVERT_VARCHAR: int SQL_CONVERT_WCHAR: int SQL_CONVERT_WLONGVARCHAR: int SQL_CONVERT_WVARCHAR: int # transaction isolation # ref: https://docs.microsoft.com/en-us/sql/relational-databases/native-client-odbc-cursors/properties/cursor-transaction-isolation-level SQL_TXN_READ_COMMITTED: int SQL_TXN_READ_UNCOMMITTED: int SQL_TXN_REPEATABLE_READ: int SQL_TXN_SERIALIZABLE: int # outer join capabilities SQL_OJ_LEFT: int SQL_OJ_RIGHT: int SQL_OJ_FULL: int SQL_OJ_NESTED: int SQL_OJ_NOT_ORDERED: int SQL_OJ_INNER: int SQL_OJ_ALL_COMPARISON_OPS: int # other ODBC database constants SQL_SCOPE_CURROW: int SQL_SCOPE_TRANSACTION: int SQL_SCOPE_SESSION: int SQL_PC_UNKNOWN: int SQL_PC_NOT_PSEUDO: int SQL_PC_PSEUDO: int # SQL_INDEX_BTREE: int # SQL_INDEX_CLUSTERED: int # SQL_INDEX_CONTENT: int # SQL_INDEX_HASHED: int # SQL_INDEX_OTHER: int # attributes for the ODBC SQLGetInfo function # https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgetinfo-function SQL_ACCESSIBLE_PROCEDURES: int SQL_ACCESSIBLE_TABLES: int SQL_ACTIVE_ENVIRONMENTS: int SQL_AGGREGATE_FUNCTIONS: int SQL_ALTER_DOMAIN: int SQL_ALTER_TABLE: int SQL_ASYNC_MODE: int SQL_BATCH_ROW_COUNT: int SQL_BATCH_SUPPORT: int SQL_BOOKMARK_PERSISTENCE: int SQL_CATALOG_LOCATION: int SQL_CATALOG_NAME: int SQL_CATALOG_NAME_SEPARATOR: int SQL_CATALOG_TERM: int SQL_CATALOG_USAGE: int SQL_COLLATION_SEQ: int SQL_COLUMN_ALIAS: int SQL_CONCAT_NULL_BEHAVIOR: int SQL_CONVERT_VARCHAR: int SQL_CORRELATION_NAME: int SQL_CREATE_ASSERTION: int SQL_CREATE_CHARACTER_SET: int SQL_CREATE_COLLATION: int SQL_CREATE_DOMAIN: int SQL_CREATE_SCHEMA: int SQL_CREATE_TABLE: int SQL_CREATE_TRANSLATION: int SQL_CREATE_VIEW: int SQL_CURSOR_COMMIT_BEHAVIOR: int SQL_CURSOR_ROLLBACK_BEHAVIOR: int # SQL_CURSOR_ROLLBACK_SQL_CURSOR_SENSITIVITY: int SQL_DATABASE_NAME: int SQL_DATA_SOURCE_NAME: int SQL_DATA_SOURCE_READ_ONLY: int SQL_DATETIME_LITERALS: int SQL_DBMS_NAME: int SQL_DBMS_VER: int SQL_DDL_INDEX: int SQL_DEFAULT_TXN_ISOLATION: int SQL_DESCRIBE_PARAMETER: int SQL_DM_VER: int SQL_DRIVER_HDESC: int SQL_DRIVER_HENV: int SQL_DRIVER_HLIB: int SQL_DRIVER_HSTMT: int SQL_DRIVER_NAME: int SQL_DRIVER_ODBC_VER: int SQL_DRIVER_VER: int SQL_DROP_ASSERTION: int SQL_DROP_CHARACTER_SET: int SQL_DROP_COLLATION: int SQL_DROP_DOMAIN: int SQL_DROP_SCHEMA: int SQL_DROP_TABLE: int SQL_DROP_TRANSLATION: int SQL_DROP_VIEW: int SQL_DYNAMIC_CURSOR_ATTRIBUTES1: int SQL_DYNAMIC_CURSOR_ATTRIBUTES2: int SQL_EXPRESSIONS_IN_ORDERBY: int SQL_FILE_USAGE: int SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1: int SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2: int SQL_GETDATA_EXTENSIONS: int SQL_GROUP_BY: int SQL_IDENTIFIER_CASE: int SQL_IDENTIFIER_QUOTE_CHAR: int SQL_INDEX_KEYWORDS: int SQL_INFO_SCHEMA_VIEWS: int SQL_INSERT_STATEMENT: int SQL_INTEGRITY: int SQL_KEYSET_CURSOR_ATTRIBUTES1: int SQL_KEYSET_CURSOR_ATTRIBUTES2: int SQL_KEYWORDS: int SQL_LIKE_ESCAPE_CLAUSE: int SQL_MAX_ASYNC_CONCURRENT_STATEMENTS: int SQL_MAX_BINARY_LITERAL_LEN: int SQL_MAX_CATALOG_NAME_LEN: int SQL_MAX_CHAR_LITERAL_LEN: int SQL_MAX_COLUMNS_IN_GROUP_BY: int SQL_MAX_COLUMNS_IN_INDEX: int SQL_MAX_COLUMNS_IN_ORDER_BY: int SQL_MAX_COLUMNS_IN_SELECT: int SQL_MAX_COLUMNS_IN_TABLE: int SQL_MAX_COLUMN_NAME_LEN: int SQL_MAX_CONCURRENT_ACTIVITIES: int SQL_MAX_CURSOR_NAME_LEN: int SQL_MAX_DRIVER_CONNECTIONS: int SQL_MAX_IDENTIFIER_LEN: int SQL_MAX_INDEX_SIZE: int SQL_MAX_PROCEDURE_NAME_LEN: int SQL_MAX_ROW_SIZE: int SQL_MAX_ROW_SIZE_INCLUDES_LONG: int SQL_MAX_SCHEMA_NAME_LEN: int SQL_MAX_STATEMENT_LEN: int SQL_MAX_TABLES_IN_SELECT: int SQL_MAX_TABLE_NAME_LEN: int SQL_MAX_USER_NAME_LEN: int SQL_MULTIPLE_ACTIVE_TXN: int SQL_MULT_RESULT_SETS: int SQL_NEED_LONG_DATA_LEN: int SQL_NON_NULLABLE_COLUMNS: int SQL_NULL_COLLATION: int SQL_NUMERIC_FUNCTIONS: int SQL_ODBC_INTERFACE_CONFORMANCE: int SQL_ODBC_VER: int SQL_OJ_CAPABILITIES: int SQL_ORDER_BY_COLUMNS_IN_SELECT: int SQL_PARAM_ARRAY_ROW_COUNTS: int SQL_PARAM_ARRAY_SELECTS: int SQL_PARAM_TYPE_UNKNOWN: int SQL_PARAM_INPUT: int SQL_PARAM_INPUT_OUTPUT: int SQL_PARAM_OUTPUT: int SQL_RETURN_VALUE: int SQL_RESULT_COL: int SQL_PROCEDURES: int SQL_PROCEDURE_TERM: int SQL_QUOTED_IDENTIFIER_CASE: int SQL_ROW_UPDATES: int SQL_SCHEMA_TERM: int SQL_SCHEMA_USAGE: int SQL_SCROLL_OPTIONS: int SQL_SEARCH_PATTERN_ESCAPE: int SQL_SERVER_NAME: int SQL_SPECIAL_CHARACTERS: int SQL_SQL92_DATETIME_FUNCTIONS: int SQL_SQL92_FOREIGN_KEY_DELETE_RULE: int SQL_SQL92_FOREIGN_KEY_UPDATE_RULE: int SQL_SQL92_GRANT: int SQL_SQL92_NUMERIC_VALUE_FUNCTIONS: int SQL_SQL92_PREDICATES: int SQL_SQL92_RELATIONAL_JOIN_OPERATORS: int SQL_SQL92_REVOKE: int SQL_SQL92_ROW_VALUE_CONSTRUCTOR: int SQL_SQL92_STRING_FUNCTIONS: int SQL_SQL92_VALUE_EXPRESSIONS: int SQL_SQL_CONFORMANCE: int SQL_STANDARD_CLI_CONFORMANCE: int SQL_STATIC_CURSOR_ATTRIBUTES1: int SQL_STATIC_CURSOR_ATTRIBUTES2: int SQL_STRING_FUNCTIONS: int SQL_SUBQUERIES: int SQL_SYSTEM_FUNCTIONS: int SQL_TABLE_TERM: int SQL_TIMEDATE_ADD_INTERVALS: int SQL_TIMEDATE_DIFF_INTERVALS: int SQL_TIMEDATE_FUNCTIONS: int SQL_TXN_CAPABLE: int SQL_TXN_ISOLATION_OPTION: int SQL_UNION: int SQL_USER_NAME: int SQL_XOPEN_CLI_YEAR: int # pyodbc-specific constants BinaryNull: Any # to distinguish binary NULL values from char NULL values # module attributes # https://www.python.org/dev/peps/pep-0249/#globals # read-only version: str # not pep-0249 apilevel: str threadsafety: int paramstyle: str # read-write pooling: bool lowercase: bool native_uuid: bool # exceptions # https://www.python.org/dev/peps/pep-0249/#exceptions class Warning(Exception): ... class Error(Exception): ... class DatabaseError(Error): ... class DataError(DatabaseError): ... class OperationalError(DatabaseError): ... class IntegrityError(DatabaseError): ... class InternalError(DatabaseError): ... class ProgrammingError(DatabaseError): ... class NotSupportedError(DatabaseError): ... # an ODBC connection to the database, for managing database transactions and creating cursors # https://www.python.org/dev/peps/pep-0249/#connection-objects class Connection: # read-write attributes autocommit: bool maxwrite: int timeout: int # read-only attributes searchescape: str # implemented dunder methods def __enter__(self) -> Connection: ... def __exit__(self, exc_type, exc_value, traceback) -> None: ... # define text encoding for data, metadata, etc. def setencoding(self, encoding: str, ctype: Optional[int]) -> None: ... def setdecoding(self, encoding: str, ctype: Optional[int]) -> None: ... # connection attributes def getinfo(self, infotype: int, /) -> Any: ... def set_attr(self, attr_id: int, value: int, /) -> None: ... # handle non-standard database data types def add_output_converter(self, sqltype: int, new_converter: Callable, /) -> None: ... def get_output_converter(self, sqltype: int, /) -> Optional[Callable]: ... def remove_output_converter(self, sqltype: int, /) -> None: ... def clear_output_converters(self) -> None: ... # query functions (in rough order of use) def cursor(self) -> Cursor: ... def execute(self, sql: str, *params) -> Cursor: ... def commit(self) -> None: ... def rollback(self) -> None: ... def close(self) -> None: ... # cursors are vehicles for executing SQL statements and returning their results # https://www.python.org/dev/peps/pep-0249/#cursor-objects class Cursor: # read-write attributes arraysize: int fast_executemany: bool noscan: bool # read-only attributes description: Tuple[Tuple[str, Any, int, int, int, int, bool]] messages: Optional[List[Tuple[str, Union[str, bytes]]]] rowcount: int connection: Connection # implemented dunder methods def __enter__(self) -> Cursor: ... def __exit__(self, exc_type, exc_value, traceback) -> None: ... def __iter__(self, /) -> Cursor: ... def __next__(self, /) -> Row: ... # query functions (in rough order of use) def setinputsizes(self, sizes: List[Tuple[int, int, int]], /) -> None: ... def setoutputsize(self) -> None: ... def execute(self, sql: str, *params) -> Cursor: ... def executemany(self, sql: str, params: Union[Sequence, Iterator, Generator], /) -> None: ... def fetchone(self) -> Row: ... def fetchmany(self, size: int, /) -> List[Row]: ... def fetchall(self) -> List[Row]: ... def fetchval(self) -> Any: ... def skip(self, count: int, /) -> None: ... def nextset(self) -> bool: ... def commit(self) -> None: ... def rollback(self) -> None: ... def cancel(self) -> None: ... def close(self) -> None: ... # metadata functions def tables(self) -> Cursor: ... def columns(self) -> Cursor: ... def statistics(self) -> Cursor: ... def rowIdColumns(self) -> Cursor: ... def rowVerColumns(self) -> Cursor: ... def primaryKeys(self) -> Cursor: ... def foreignKeys(self) -> Cursor: ... def getTypeInfo(self) -> Cursor: ... def procedures(self) -> Cursor: ... def procedureColumns(self) -> Cursor: ... # a Row object represents a single database record, and behaves somewhat similar to a NamedTuple class Row: cursor_description: Tuple[Tuple[str, Any, int, int, int, int, bool]] # implemented dunder methods def __contains__(self, key, /) -> int: ... def __delattr__(self, name, /) -> None: ... def __delitem__(self, key, /) -> None: ... def __eq__(self, value, /) -> bool: ... def __ge__(self, value, /) -> bool: ... def __getattribute__(self, name, /) -> Any: ... def __getitem__(self, key, /) -> Any: ... def __gt__(self, value, /) -> bool: ... def __le__(self, value, /) -> bool: ... def __len__(self, /) -> int: ... def __lt__(self, value, /) -> bool: ... def __ne__(self, value, /) -> bool: ... def __reduce__(self) -> Any: ... def __repr__(self, /) -> str: ... def __setattr__(self, name, value, /) -> None: ... def __setitem__(self, key, value, /) -> None: ... # module functions def dataSources() -> Dict[str, str]: ... def drivers() -> List[str]: ... def setDecimalSeparator(sep: str, /) -> None: ... def getDecimalSeparator() -> str: ... # https://www.python.org/dev/peps/pep-0249/#connect def connect(connstring: str, /, *, # only positional parameters before, only named parameters after autocommit: bool = False, encoding: str = 'utf-16le', ansi: bool = False, readonly: bool = False, timeout: int = 0, attrs_before: dict = {}, **kwargs) -> Connection: ... ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/pyodbc.rc0000664000175000017500000000327000000000000017022 0ustar00mkleehammermkleehammer // This was a Microsoft Visual C++ generated resource script, but I've hand modified it to // remove the afxres.h include. Apparently Visual Studio 2008 Express (used to build the // Python 2.6 version) does not include afxres.h which is part of MFC. This will probably // not be editable in the Visual Studio resource editor. #include #include "resource.h" #define STRINGIZER(version) #version #define PRODUCT_VERSION_STRING(major,minor) STRINGIZER(major) "." STRINGIZER(minor) #define FILE_VERSION_STRING(major,minor,micro,build) STRINGIZER(major) "." STRINGIZER(minor) "." STRINGIZER(micro) "." STRINGIZER(build) LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US #pragma code_page(1252) VS_VERSION_INFO VERSIONINFO FILEVERSION PYODBC_MAJOR,PYODBC_MINOR,PYODBC_MICRO,PYODBC_BUILD PRODUCTVERSION PYODBC_MAJOR,PYODBC_MINOR,PYODBC_MICRO,PYODBC_BUILD FILEFLAGSMASK 0x17L #ifdef _DEBUG FILEFLAGS 0x1L #else FILEFLAGS 0x0L #endif FILEOS 0x4L FILETYPE 0x2L FILESUBTYPE 0x0L BEGIN BLOCK "StringFileInfo" BEGIN BLOCK "040904b0" BEGIN VALUE "Copyright", "Copyright 2009 Michael Kleehammer" VALUE "ProductName", "ODBC DB API 2.0 Module" VALUE "ProductVersion", PRODUCT_VERSION_STRING(PYODBC_MAJOR,PYODBC_MINOR) VALUE "FileDescription", "ODBC DB API 2.0 Module" VALUE "FileVersion", FILE_VERSION_STRING(PYODBC_MAJOR,PYODBC_MINOR,PYODBC_MICRO,PYODBC_BUILD) VALUE "InternalName", "pyodbc" VALUE "OriginalFilename", "pyodbc.pyd" END END BLOCK "VarFileInfo" BEGIN VALUE "Translation", 0x409, 1200 END END ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/pyodbccompat.cpp0000664000175000017500000000167500000000000020413 0ustar00mkleehammermkleehammer#include "pyodbc.h" bool Text_EqualsI(PyObject* lhs, const char* rhs) { #if PY_MAJOR_VERSION < 3 // In Python 2, allow ANSI strings. if (lhs && PyString_Check(lhs)) return _strcmpi(PyString_AS_STRING(lhs), rhs) == 0; #endif if (lhs == 0 || !PyUnicode_Check(lhs)) return false; Py_ssize_t cchLHS = PyUnicode_GET_SIZE(lhs); Py_ssize_t cchRHS = (Py_ssize_t)strlen(rhs); if (cchLHS != cchRHS) return false; Py_UNICODE* p = PyUnicode_AS_UNICODE(lhs); for (Py_ssize_t i = 0; i < cchLHS; i++) { int chL = (int)Py_UNICODE_TOUPPER(p[i]); int chR = (int)toupper(rhs[i]); if (chL != chR) return false; } return true; } #if PY_MAJOR_VERSION < 3 int PyCodec_KnownEncoding(const char *encoding) { PyObject* codec = _PyCodec_Lookup(encoding); if (codec) { Py_DECREF(codec); return 1; } PyErr_Clear(); return 0; } #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/pyodbccompat.h0000664000175000017500000001102100000000000020042 0ustar00mkleehammermkleehammer#ifndef PYODBCCOMPAT_H #define PYODBCCOMPAT_H // Macros and functions to ease compatibility with Python 2 and Python 3. #if PY_VERSION_HEX >= 0x03000000 && PY_VERSION_HEX < 0x03010000 #error Python 3.0 is not supported. Please use 3.1 and higher. #endif // Macros introduced in 2.6, backported for 2.4 and 2.5. #ifndef PyVarObject_HEAD_INIT #define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, #endif #ifndef Py_TYPE #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #endif // Macros were introduced in 2.6 to map "bytes" to "str" in Python 2. Back port to 2.5. #if PY_VERSION_HEX >= 0x02060000 #include #else #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Size PyString_Size #define _PyBytes_Resize _PyString_Resize #endif // Used for items that are ANSI in Python 2 and Unicode in Python 3 or in int 2 and long in 3. #if PY_MAJOR_VERSION >= 3 #define PyString_FromString PyUnicode_FromString #define PyString_FromStringAndSize PyUnicode_FromStringAndSize #define PyString_Check PyUnicode_Check #define PyString_Type PyUnicode_Type #define PyString_Size PyUnicode_Size #define PyInt_FromLong PyLong_FromLong #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_Type PyLong_Type #define PyString_FromFormatV PyUnicode_FromFormatV #define PyString_FromFormat PyUnicode_FromFormat #define Py_TPFLAGS_HAVE_ITER 0 #define PyString_AsString PyUnicode_AsString #define TEXT_T Py_UNICODE #define PyString_Join PyUnicode_Join inline void PyString_ConcatAndDel(PyObject** lhs, PyObject* rhs) { PyUnicode_Concat(*lhs, rhs); Py_DECREF(rhs); } #else #include #include #include #define TEXT_T char #define PyString_Join _PyString_Join #endif inline PyObject* Text_New(Py_ssize_t length) { // Returns a new, uninitialized String (Python 2) or Unicode object (Python 3) object. #if PY_MAJOR_VERSION < 3 return PyString_FromStringAndSize(0, length); #else return PyUnicode_FromUnicode(0, length); #endif } inline TEXT_T* Text_Buffer(PyObject* o) { #if PY_MAJOR_VERSION < 3 I(PyString_Check(o)); return PyString_AS_STRING(o); #else I(PyUnicode_Check(o)); return PyUnicode_AS_UNICODE(o); #endif } inline bool IntOrLong_Check(PyObject* o) { // A compatability function to check for an int or long. Python 3 doesn't differentate // anymore. // A compatibility function that determines if the object is a string, based on the version of Python. // For Python 2, an ASCII or Unicode string is allowed. For Python 3, it must be a Unicode object. #if PY_MAJOR_VERSION < 3 if (o && PyInt_Check(o)) return true; #endif return o && PyLong_Check(o); } inline bool Text_Check(PyObject* o) { // A compatibility function that determines if the object is a string, based on the version of Python. // For Python 2, an ASCII or Unicode string is allowed. For Python 3, it must be a Unicode object. #if PY_MAJOR_VERSION < 3 if (o && PyString_Check(o)) return true; #endif return o && PyUnicode_Check(o); } bool Text_EqualsI(PyObject* lhs, const char* rhs); // Case-insensitive comparison for a Python string object (Unicode in Python 3, ASCII or Unicode in Python 2) against // an ASCII string. If lhs is 0 or None, false is returned. inline Py_ssize_t Text_Size(PyObject* o) { #if PY_MAJOR_VERSION < 3 if (o && PyString_Check(o)) return PyString_GET_SIZE(o); #endif return (o && PyUnicode_Check(o)) ? PyUnicode_GET_SIZE(o) : 0; } inline Py_ssize_t TextCopyToUnicode(Py_UNICODE* buffer, PyObject* o) { // Copies a String or Unicode object to a Unicode buffer and returns the number of characters copied. // No NULL terminator is appended! #if PY_MAJOR_VERSION < 3 if (PyBytes_Check(o)) { const Py_ssize_t cch = PyBytes_GET_SIZE(o); const char * pch = PyBytes_AS_STRING(o); for (Py_ssize_t i = 0; i < cch; i++) *buffer++ = (Py_UNICODE)*pch++; return cch; } else { #endif Py_ssize_t cch = PyUnicode_GET_SIZE(o); memcpy(buffer, PyUnicode_AS_UNICODE(o), cch * sizeof(Py_UNICODE)); return cch; #if PY_MAJOR_VERSION < 3 } #endif } #if PY_MAJOR_VERSION < 3 int PyCodec_KnownEncoding(const char *encoding); #endif #endif // PYODBCCOMPAT_H ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/pyodbcdbg.cpp0000664000175000017500000001156100000000000017657 0ustar00mkleehammermkleehammer #include "pyodbc.h" #include "dbspecific.h" void PrintBytes(void* p, size_t len) { unsigned char* pch = (unsigned char*)p; for (size_t i = 0; i < len; i++) printf("%02x ", (int)pch[i]); printf("\n"); } #define _MAKESTR(n) case n: return #n const char* SqlTypeName(SQLSMALLINT n) { switch (n) { _MAKESTR(SQL_UNKNOWN_TYPE); _MAKESTR(SQL_CHAR); _MAKESTR(SQL_VARCHAR); _MAKESTR(SQL_LONGVARCHAR); _MAKESTR(SQL_NUMERIC); _MAKESTR(SQL_DECIMAL); _MAKESTR(SQL_INTEGER); _MAKESTR(SQL_SMALLINT); _MAKESTR(SQL_FLOAT); _MAKESTR(SQL_REAL); _MAKESTR(SQL_DOUBLE); _MAKESTR(SQL_DATETIME); _MAKESTR(SQL_WCHAR); _MAKESTR(SQL_WVARCHAR); _MAKESTR(SQL_WLONGVARCHAR); _MAKESTR(SQL_TYPE_DATE); _MAKESTR(SQL_TYPE_TIME); _MAKESTR(SQL_TYPE_TIMESTAMP); _MAKESTR(SQL_SS_TIME2); _MAKESTR(SQL_SS_XML); _MAKESTR(SQL_BINARY); _MAKESTR(SQL_VARBINARY); _MAKESTR(SQL_LONGVARBINARY); } return "unknown"; } const char* CTypeName(SQLSMALLINT n) { switch (n) { _MAKESTR(SQL_C_CHAR); _MAKESTR(SQL_C_WCHAR); _MAKESTR(SQL_C_LONG); _MAKESTR(SQL_C_SHORT); _MAKESTR(SQL_C_FLOAT); _MAKESTR(SQL_C_DOUBLE); _MAKESTR(SQL_C_NUMERIC); _MAKESTR(SQL_C_DEFAULT); _MAKESTR(SQL_C_DATE); _MAKESTR(SQL_C_TIME); _MAKESTR(SQL_C_TIMESTAMP); _MAKESTR(SQL_C_TYPE_DATE); _MAKESTR(SQL_C_TYPE_TIME); _MAKESTR(SQL_C_TYPE_TIMESTAMP); _MAKESTR(SQL_C_INTERVAL_YEAR); _MAKESTR(SQL_C_INTERVAL_MONTH); _MAKESTR(SQL_C_INTERVAL_DAY); _MAKESTR(SQL_C_INTERVAL_HOUR); _MAKESTR(SQL_C_INTERVAL_MINUTE); _MAKESTR(SQL_C_INTERVAL_SECOND); _MAKESTR(SQL_C_INTERVAL_YEAR_TO_MONTH); _MAKESTR(SQL_C_INTERVAL_DAY_TO_HOUR); _MAKESTR(SQL_C_INTERVAL_DAY_TO_MINUTE); _MAKESTR(SQL_C_INTERVAL_DAY_TO_SECOND); _MAKESTR(SQL_C_INTERVAL_HOUR_TO_MINUTE); _MAKESTR(SQL_C_INTERVAL_HOUR_TO_SECOND); _MAKESTR(SQL_C_INTERVAL_MINUTE_TO_SECOND); _MAKESTR(SQL_C_BINARY); _MAKESTR(SQL_C_BIT); _MAKESTR(SQL_C_SBIGINT); _MAKESTR(SQL_C_UBIGINT); _MAKESTR(SQL_C_TINYINT); _MAKESTR(SQL_C_SLONG); _MAKESTR(SQL_C_SSHORT); _MAKESTR(SQL_C_STINYINT); _MAKESTR(SQL_C_ULONG); _MAKESTR(SQL_C_USHORT); _MAKESTR(SQL_C_UTINYINT); _MAKESTR(SQL_C_GUID); } return "unknown"; } #ifdef PYODBC_TRACE void DebugTrace(const char* szFmt, ...) { va_list marker; va_start(marker, szFmt); vprintf(szFmt, marker); va_end(marker); } #endif #ifdef PYODBC_LEAK_CHECK // THIS IS NOT THREAD SAFE: This is only designed for the single-threaded unit tests! struct Allocation { const char* filename; int lineno; size_t len; void* pointer; int counter; }; static Allocation* allocs = 0; static int bufsize = 0; static int count = 0; static int allocCounter = 0; void* _pyodbc_malloc(const char* filename, int lineno, size_t len) { void* p = malloc(len); if (p == 0) return 0; if (count == bufsize) { allocs = (Allocation*)realloc(allocs, (bufsize + 20) * sizeof(Allocation)); if (allocs == 0) { // Yes we just lost the original pointer, but we don't care since everything is about to fail. This is a // debug leak check, not a production malloc that needs to be robust in low memory. bufsize = 0; count = 0; return 0; } bufsize += 20; } allocs[count].filename = filename; allocs[count].lineno = lineno; allocs[count].len = len; allocs[count].pointer = p; allocs[count].counter = allocCounter++; printf("malloc(%d): %s(%d) %d %p\n", allocs[count].counter, filename, lineno, (int)len, p); count += 1; return p; } void pyodbc_free(void* p) { if (p == 0) return; for (int i = 0; i < count; i++) { if (allocs[i].pointer == p) { printf("free(%d): %s(%d) %d %p i=%d\n", allocs[i].counter, allocs[i].filename, allocs[i].lineno, (int)allocs[i].len, allocs[i].pointer, i); memmove(&allocs[i], &allocs[i + 1], sizeof(Allocation) * (count - i - 1)); count -= 1; free(p); return; } } printf("FREE FAILED: %p\n", p); free(p); } void pyodbc_leak_check() { if (count == 0) { printf("NO LEAKS\n"); } else { printf("********************************************************************************\n"); printf("%d leaks\n", count); for (int i = 0; i < count; i++) printf("LEAK: %d %s(%d) len=%d\n", allocs[i].counter, allocs[i].filename, allocs[i].lineno, allocs[i].len); } } #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1622310926.0 pyodbc-4.0.32/src/pyodbcmodule.cpp0000664000175000017500000013046100000000000020411 0ustar00mkleehammermkleehammer// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" #include "pyodbcmodule.h" #include "connection.h" #include "cursor.h" #include "row.h" #include "errors.h" #include "getdata.h" #include "cnxninfo.h" #include "params.h" #include "dbspecific.h" #include #include #include static PyObject* MakeConnectionString(PyObject* existing, PyObject* parts); PyObject* pModule = 0; static char module_doc[] = "A database module for accessing databases via ODBC.\n" "\n" "This module conforms to the DB API 2.0 specification while providing\n" "non-standard convenience features. Only standard Python data types are used\n" "so additional DLLs are not required.\n" "\n" "Static Variables:\n\n" "version\n" " The module version string. Official builds will have a version in the format\n" " `major.minor.revision`, such as 2.1.7. Beta versions will have -beta appended,\n" " such as 2.1.8-beta03. (This would be a build before the official 2.1.8 release.)\n" " Some special test builds will have a test name (the git branch name) prepended,\n" " such as fixissue90-2.1.8-beta03.\n" "\n" "apilevel\n" " The string constant '2.0' indicating this module supports DB API level 2.0.\n" "\n" "lowercase\n" " A Boolean that controls whether column names in result rows are lowercased.\n" " This can be changed any time and affects queries executed after the change.\n" " The default is False. This can be useful when database columns have\n" " inconsistent capitalization.\n" "\n" "pooling\n" " A Boolean indicating whether connection pooling is enabled. This is a\n" " global (HENV) setting, so it can only be modified before the first\n" " connection is made. The default is True, which enables ODBC connection\n" " pooling.\n" "\n" "threadsafety\n" " The integer 1, indicating that threads may share the module but not\n" " connections. Note that connections and cursors may be used by different\n" " threads, just not at the same time.\n" "\n" "paramstyle\n" " The string constant 'qmark' to indicate parameters are identified using\n" " question marks."; PyObject* Error; PyObject* Warning; PyObject* InterfaceError; PyObject* DatabaseError; PyObject* InternalError; PyObject* OperationalError; PyObject* ProgrammingError; PyObject* IntegrityError; PyObject* DataError; PyObject* NotSupportedError; struct ExcInfo { const char* szName; const char* szFullName; PyObject** ppexc; PyObject** ppexcParent; const char* szDoc; }; #define MAKEEXCINFO(name, parent, doc) { #name, "pyodbc." #name, &name, &parent, doc } static ExcInfo aExcInfos[] = { MAKEEXCINFO(Error, PyExc_Exception, "Exception that is the base class of all other error exceptions. You can use\n" "this to catch all errors with one single 'except' statement."), MAKEEXCINFO(Warning, PyExc_Exception, "Exception raised for important warnings like data truncations while inserting,\n" " etc."), MAKEEXCINFO(InterfaceError, Error, "Exception raised for errors that are related to the database interface rather\n" "than the database itself."), MAKEEXCINFO(DatabaseError, Error, "Exception raised for errors that are related to the database."), MAKEEXCINFO(DataError, DatabaseError, "Exception raised for errors that are due to problems with the processed data\n" "like division by zero, numeric value out of range, etc."), MAKEEXCINFO(OperationalError, DatabaseError, "Exception raised for errors that are related to the database's operation and\n" "not necessarily under the control of the programmer, e.g. an unexpected\n" "disconnect occurs, the data source name is not found, a transaction could not\n" "be processed, a memory allocation error occurred during processing, etc."), MAKEEXCINFO(IntegrityError, DatabaseError, "Exception raised when the relational integrity of the database is affected,\n" "e.g. a foreign key check fails."), MAKEEXCINFO(InternalError, DatabaseError, "Exception raised when the database encounters an internal error, e.g. the\n" "cursor is not valid anymore, the transaction is out of sync, etc."), MAKEEXCINFO(ProgrammingError, DatabaseError, "Exception raised for programming errors, e.g. table not found or already\n" "exists, syntax error in the SQL statement, wrong number of parameters\n" "specified, etc."), MAKEEXCINFO(NotSupportedError, DatabaseError, "Exception raised in case a method or database API was used which is not\n" "supported by the database, e.g. requesting a .rollback() on a connection that\n" "does not support transaction or has transactions turned off.") }; bool pyodbc_realloc(BYTE** pp, size_t newlen) { // A wrapper around realloc with a safer interface. If it is successful, *pp is updated to the // new pointer value. If not successful, it is not modified. (It is easy to forget and lose // the old pointer value with realloc.) BYTE* pT = (BYTE*)realloc(*pp, newlen); if (pT == 0) return false; *pp = pT; return true; } bool UseNativeUUID() { PyObject* o = PyObject_GetAttrString(pModule, "native_uuid"); // If this fails for some reason, we'll assume false and allow the exception to pop up later. bool b = o && PyObject_IsTrue(o); Py_XDECREF(o); return b; } HENV henv = SQL_NULL_HANDLE; Py_UNICODE chDecimal = '.'; PyObject* GetClassForThread(const char* szModule, const char* szClass) { // Returns the given class, specific to the current thread's interpreter. For performance // these are cached for each thread. // // This is for internal use only, so we'll cache using only the class name. Make sure they // are unique. (That is, don't try to import classes with the same name from two different // modules.) PyObject* dict = PyThreadState_GetDict(); I(dict); if (dict == 0) { // I don't know why there wouldn't be thread state so I'm going to raise an exception // unless I find more info. return PyErr_Format(PyExc_Exception, "pyodbc: PyThreadState_GetDict returned NULL"); } // Check the cache. GetItemString returns a borrowed reference. PyObject* cls = PyDict_GetItemString(dict, szClass); if (cls) { Py_INCREF(cls); return cls; } // Import the class and cache it. GetAttrString returns a new reference. PyObject* mod = PyImport_ImportModule(szModule); if (!mod) return 0; cls = PyObject_GetAttrString(mod, szClass); Py_DECREF(mod); if (!cls) return 0; // SetItemString increments the refcount (not documented) PyDict_SetItemString(dict, szClass, cls); return cls; } bool IsInstanceForThread(PyObject* param, const char* szModule, const char* szClass, PyObject** pcls) { // Like PyObject_IsInstance but compares against a class specific to the current thread's // interpreter, for proper subinterpreter support. Uses GetClassForThread. // // If `param` is an instance of the given class, true is returned and a new reference to // the class, specific to the current thread, is returned via pcls. The caller is // responsible for decrementing the class. // // If `param` is not an instance, true is still returned (!) but *pcls will be zero. // // False is only returned when an exception has been raised. (That is, the return value is // not used to indicate whether the instance check matched or not.) if (param == 0) { *pcls = 0; return true; } PyObject* cls = GetClassForThread(szModule, szClass); if (!cls) { *pcls = 0; return false; } int n = PyObject_IsInstance(param, cls); // (The checks below can be compressed into just a few lines, but I was concerned it // wouldn't be clear.) if (n == 1) { // We have a match. *pcls = cls; return true; } Py_DECREF(cls); *pcls = 0; if (n == 0) { // No exception, but not a match. return true; } // n == -1; an exception occurred return false; } // Initialize the global decimal character and thousands separator character, used when parsing decimal // objects. // static void init_locale_info() { Object module(PyImport_ImportModule("locale")); if (!module) { PyErr_Clear(); return; } Object ldict(PyObject_CallMethod(module, "localeconv", 0)); if (!ldict) { PyErr_Clear(); return; } PyObject* value = PyDict_GetItemString(ldict, "decimal_point"); if (value) { if (PyBytes_Check(value) && PyBytes_Size(value) == 1) chDecimal = (Py_UNICODE)PyBytes_AS_STRING(value)[0]; if (PyUnicode_Check(value) && PyUnicode_GET_SIZE(value) == 1) chDecimal = PyUnicode_AS_UNICODE(value)[0]; } } static bool import_types() { // Note: We can only import types from C extensions since they are shared among all // interpreters. Other classes are imported per-thread via GetClassForThread. // In Python 2.5 final, PyDateTime_IMPORT no longer works unless the datetime module was previously // imported (among other problems). PyObject* pdt = PyImport_ImportModule("datetime"); if (!pdt) return false; PyDateTime_IMPORT; Cursor_init(); if (!CnxnInfo_init()) return false; GetData_init(); if (!Params_init()) return false; return true; } static bool AllocateEnv() { PyObject* pooling = PyObject_GetAttrString(pModule, "pooling"); bool bPooling = pooling == Py_True; Py_DECREF(pooling); if (bPooling) { if (!SQL_SUCCEEDED(SQLSetEnvAttr(SQL_NULL_HANDLE, SQL_ATTR_CONNECTION_POOLING, (SQLPOINTER)SQL_CP_ONE_PER_HENV, sizeof(int)))) { PyErr_SetString(PyExc_RuntimeError, "Unable to set SQL_ATTR_CONNECTION_POOLING attribute."); return false; } } if (!SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &henv))) { PyErr_SetString(PyExc_RuntimeError, "Can't initialize module pyodbc. SQLAllocEnv failed."); return false; } if (!SQL_SUCCEEDED(SQLSetEnvAttr(henv, SQL_ATTR_ODBC_VERSION, (SQLPOINTER)SQL_OV_ODBC3, sizeof(int)))) { PyErr_SetString(PyExc_RuntimeError, "Unable to set SQL_ATTR_ODBC_VERSION attribute."); return false; } return true; } static bool CheckAttrsVal(PyObject *val, bool allowSeq) { if (IntOrLong_Check(val) #if PY_MAJOR_VERSION < 3 || PyBuffer_Check(val) #endif #if PY_VERSION_HEX >= 0x02060000 || PyByteArray_Check(val) #endif || PyBytes_Check(val) || PyUnicode_Check(val)) return true; if (allowSeq && PySequence_Check(val)) { Py_ssize_t len = PySequence_Size(val); for (Py_ssize_t i = 0; i < len; i++) { Object v(PySequence_GetItem(val, i)); if (!CheckAttrsVal(v, false)) return false; } return true; } return PyErr_Format(PyExc_TypeError, "Attribute dictionary attrs must be" " integers, buffers, bytes, %s", allowSeq ? "strings, or sequences" : "or strings") != 0; } static PyObject* _CheckAttrsDict(PyObject* attrs) { // The attrs_before dictionary must be keys to integer values. If valid and non-empty, // increment the reference count and return the pointer to indicate the calling code should // keep it. If empty, just return zero which indicates to the calling code it should not // keep the value. If an error occurs, set an error. The calling code must look for this // in the zero case. // We already know this is a dictionary. if (PyDict_Size(attrs) == 0) return 0; Py_ssize_t pos = 0; PyObject* key = 0; PyObject* value = 0; while (PyDict_Next(attrs, &pos, &key, &value)) { if (!IntOrLong_Check(key)) return PyErr_Format(PyExc_TypeError, "Attribute dictionary keys must be integers"); if (!CheckAttrsVal(value, true)) return 0; } Py_INCREF(attrs); return attrs; } // Map DB API recommended keywords to ODBC keywords. struct keywordmap { const char* oldname; const char* newname; PyObject* newnameObject; // PyString object version of newname, created as needed. }; static keywordmap keywordmaps[] = { { "user", "uid", 0 }, { "password", "pwd", 0 }, { "host", "server", 0 }, }; static PyObject* mod_connect(PyObject* self, PyObject* args, PyObject* kwargs) { UNUSED(self); Object pConnectString; int fAutoCommit = 0; int fAnsi = 0; // force ansi int fReadOnly = 0; long timeout = 0; Object encoding; Object attrs_before; // Optional connect attrs set before connecting Py_ssize_t size = args ? PyTuple_Size(args) : 0; if (size > 1) { PyErr_SetString(PyExc_TypeError, "function takes at most 1 non-keyword argument"); return 0; } if (size == 1) { if (!PyString_Check(PyTuple_GET_ITEM(args, 0)) && !PyUnicode_Check(PyTuple_GET_ITEM(args, 0))) return PyErr_Format(PyExc_TypeError, "argument 1 must be a string or unicode object"); pConnectString.Attach(PyUnicode_FromObject(PyTuple_GetItem(args, 0))); if (!pConnectString.IsValid()) return 0; } if (kwargs && PyDict_Size(kwargs) > 0) { Object partsdict(PyDict_New()); if (!partsdict.IsValid()) return 0; Py_ssize_t pos = 0; PyObject* key = 0; PyObject* value = 0; Object okey; // in case we need to allocate a new key while (PyDict_Next(kwargs, &pos, &key, &value)) { if (!Text_Check(key)) return PyErr_Format(PyExc_TypeError, "Dictionary keys passed to connect must be strings"); // // Note: key and value are *borrowed*. // // // Check for the two non-connection string keywords we accept. (If we get many more of these, create something // // table driven. Are we sure there isn't a Python function to parse keywords but leave those it doesn't know?) // const char* szKey = PyString_AsString(key); if (Text_EqualsI(key, "autocommit")) { fAutoCommit = PyObject_IsTrue(value); continue; } if (Text_EqualsI(key, "ansi")) { fAnsi = PyObject_IsTrue(value); continue; } if (Text_EqualsI(key, "timeout")) { timeout = PyInt_AsLong(value); if (PyErr_Occurred()) return 0; continue; } if (Text_EqualsI(key, "readonly")) { fReadOnly = PyObject_IsTrue(value); continue; } if (Text_EqualsI(key, "attrs_before") && PyDict_Check(value)) { attrs_before = _CheckAttrsDict(value); if (PyErr_Occurred()) return 0; continue; } if (Text_EqualsI(key, "encoding")) { #if PY_MAJOR_VERSION < 3 if (!PyString_Check(value) && !PyUnicode_Check(value)) return PyErr_Format(PyExc_TypeError, "encoding must be a string or unicode object"); #else if (!PyUnicode_Check(value)) return PyErr_Format(PyExc_TypeError, "encoding must be a string"); #endif encoding = value; continue; } // Map DB API recommended names to ODBC names (e.g. user --> uid). for (size_t i = 0; i < _countof(keywordmaps); i++) { if (Text_EqualsI(key, keywordmaps[i].oldname)) { if (keywordmaps[i].newnameObject == 0) { keywordmaps[i].newnameObject = PyString_FromString(keywordmaps[i].newname); if (keywordmaps[i].newnameObject == 0) return 0; } key = keywordmaps[i].newnameObject; break; } } PyObject* str = PyObject_Str(value); // convert if necessary if (!str) return 0; if (PyDict_SetItem(partsdict.Get(), key, str) == -1) { Py_XDECREF(str); return 0; } Py_XDECREF(str); } if (PyDict_Size(partsdict.Get())) pConnectString.Attach(MakeConnectionString(pConnectString.Get(), partsdict)); } if (!pConnectString.IsValid()) return PyErr_Format(PyExc_TypeError, "no connection information was passed"); if (henv == SQL_NULL_HANDLE) { if (!AllocateEnv()) return 0; } return (PyObject*)Connection_New(pConnectString.Get(), fAutoCommit != 0, fAnsi != 0, timeout, fReadOnly != 0, attrs_before.Detach(), encoding); } static PyObject* mod_drivers(PyObject* self) { UNUSED(self); if (henv == SQL_NULL_HANDLE && !AllocateEnv()) return 0; Object result(PyList_New(0)); if (!result) return 0; SQLCHAR szDriverDesc[500]; SWORD cbDriverDesc; SWORD cbAttrs; SQLRETURN ret; SQLUSMALLINT nDirection = SQL_FETCH_FIRST; for (;;) { ret = SQLDrivers(henv, nDirection, szDriverDesc, _countof(szDriverDesc), &cbDriverDesc, 0, 0, &cbAttrs); if (!SQL_SUCCEEDED(ret)) break; // REVIEW: This is another reason why we really need a factory that we can use. At this // point we don't have a global text encoding that we can assume for this. Somehow it // seems to be working to use UTF-8, even on Windows. Object name(PyString_FromString((const char*)szDriverDesc)); if (!name) return 0; if (PyList_Append(result, name.Get()) != 0) return 0; name.Detach(); nDirection = SQL_FETCH_NEXT; } if (ret != SQL_NO_DATA) { Py_DECREF(result); return RaiseErrorFromHandle(0, "SQLDrivers", SQL_NULL_HANDLE, SQL_NULL_HANDLE); } return result.Detach(); } static PyObject* mod_datasources(PyObject* self) { UNUSED(self); if (henv == SQL_NULL_HANDLE && !AllocateEnv()) return 0; PyObject* result = PyDict_New(); if (!result) return 0; SQLCHAR szDSN[500]; // Using a buffer larger than SQL_MAX_DSN_LENGTH + 1 for systems that ignore it SWORD cbDSN; SQLCHAR szDesc[500]; SWORD cbDesc; SQLUSMALLINT nDirection = SQL_FETCH_FIRST; SQLRETURN ret; for (;;) { ret = SQLDataSources(henv, nDirection, szDSN, _countof(szDSN), &cbDSN, szDesc, _countof(szDesc), &cbDesc); if (!SQL_SUCCEEDED(ret)) break; PyDict_SetItemString(result, (const char*)szDSN, PyString_FromString((const char*)szDesc)); nDirection = SQL_FETCH_NEXT; } if (ret != SQL_NO_DATA) { Py_DECREF(result); return RaiseErrorFromHandle(0, "SQLDataSources", SQL_NULL_HANDLE, SQL_NULL_HANDLE); } return result; } static PyObject* mod_timefromticks(PyObject* self, PyObject* args) { UNUSED(self); PyObject* num; if (!PyArg_ParseTuple(args, "O", &num)) return 0; if (!PyNumber_Check(num)) return PyErr_Format(PyExc_TypeError, "TimeFromTicks requires a number."); Object l(PyNumber_Long(num)); if (!l) return 0; time_t t = PyLong_AsLong(num); struct tm* fields = localtime(&t); return PyTime_FromTime(fields->tm_hour, fields->tm_min, fields->tm_sec, 0); } static PyObject* mod_datefromticks(PyObject* self, PyObject* args) { UNUSED(self); return PyDate_FromTimestamp(args); } static PyObject* mod_timestampfromticks(PyObject* self, PyObject* args) { UNUSED(self); return PyDateTime_FromTimestamp(args); } static PyObject* mod_setdecimalsep(PyObject* self, PyObject* args) { UNUSED(self); if (!PyString_Check(PyTuple_GET_ITEM(args, 0)) && !PyUnicode_Check(PyTuple_GET_ITEM(args, 0))) return PyErr_Format(PyExc_TypeError, "argument 1 must be a string or unicode object"); PyObject* value = PyUnicode_FromObject(PyTuple_GetItem(args, 0)); if (value) { if (PyBytes_Check(value) && PyBytes_Size(value) == 1) chDecimal = (Py_UNICODE)PyBytes_AS_STRING(value)[0]; if (PyUnicode_Check(value) && PyUnicode_GET_SIZE(value) == 1) chDecimal = PyUnicode_AS_UNICODE(value)[0]; } Py_RETURN_NONE; } static PyObject* mod_getdecimalsep(PyObject* self) { UNUSED(self); return PyUnicode_FromUnicode(&chDecimal, 1); } static char connect_doc[] = "connect(str, autocommit=False, ansi=False, timeout=0, **kwargs) --> Connection\n" "\n" "Accepts an ODBC connection string and returns a new Connection object.\n" "\n" "The connection string will be passed to SQLDriverConnect, so a DSN connection\n" "can be created using:\n" "\n" " cnxn = pyodbc.connect('DSN=DataSourceName;UID=user;PWD=password')\n" "\n" "To connect without requiring a DSN, specify the driver and connection\n" "information:\n" "\n" " DRIVER={SQL Server};SERVER=localhost;DATABASE=testdb;UID=user;PWD=password\n" "\n" "Note the use of braces when a value contains spaces. Refer to SQLDriverConnect\n" "documentation or the documentation of your ODBC driver for details.\n" "\n" "The connection string can be passed as the string `str`, as a list of keywords,\n" "or a combination of the two. Any keywords except autocommit, ansi, and timeout\n" "(see below) are simply added to the connection string.\n" "\n" " connect('server=localhost;user=me')\n" " connect(server='localhost', user='me')\n" " connect('server=localhost', user='me')\n" "\n" "The DB API recommends the keywords 'user', 'password', and 'host', but these\n" "are not valid ODBC keywords, so these will be converted to 'uid', 'pwd', and\n" "'server'.\n" "\n" "Special Keywords\n" "\n" "The following specal keywords are processed by pyodbc and are not added to the\n" "connection string. (If you must use these in your connection string, pass them\n" "as a string, not as keywords.)\n" "\n" " autocommit\n" " If False or zero, the default, transactions are created automatically as\n" " defined in the DB API 2. If True or non-zero, the connection is put into\n" " ODBC autocommit mode and statements are committed automatically.\n" " \n" " ansi\n" " By default, pyodbc first attempts to connect using the Unicode version of\n" " SQLDriverConnectW. If the driver returns IM001 indicating it does not\n" " support the Unicode version, the ANSI version is tried. Any other SQLSTATE\n" " is turned into an exception. Setting ansi to true skips the Unicode\n" " attempt and only connects using the ANSI version. This is useful for\n" " drivers that return the wrong SQLSTATE (or if pyodbc is out of date and\n" " should support other SQLSTATEs).\n" " \n" " timeout\n" " An integer login timeout in seconds, used to set the SQL_ATTR_LOGIN_TIMEOUT\n" " attribute of the connection. The default is 0 which means the database's\n" " default timeout, if any, is used.\n"; static char timefromticks_doc[] = "TimeFromTicks(ticks) --> datetime.time\n" "\n" "Returns a time object initialized from the given ticks value (number of seconds\n" "since the epoch; see the documentation of the standard Python time module for\n" "details)."; static char datefromticks_doc[] = "DateFromTicks(ticks) --> datetime.date\n" \ "\n" \ "Returns a date object initialized from the given ticks value (number of seconds\n" \ "since the epoch; see the documentation of the standard Python time module for\n" \ "details)."; static char timestampfromticks_doc[] = "TimestampFromTicks(ticks) --> datetime.datetime\n" \ "\n" \ "Returns a datetime object initialized from the given ticks value (number of\n" \ "seconds since the epoch; see the documentation of the standard Python time\n" \ "module for details"; static char drivers_doc[] = "drivers() --> [ DriverName1, DriverName2 ... DriverNameN ]\n" \ "\n" \ "Returns a list of installed drivers."; static char datasources_doc[] = "dataSources() --> { DSN : Description }\n" \ "\n" \ "Returns a dictionary mapping available DSNs to their descriptions."; static char setdecimalsep_doc[] = "setDecimalSeparator(string) -> None\n" \ "\n" \ "Sets the decimal separator character used when parsing NUMERIC from the database."; static char getdecimalsep_doc[] = "getDecimalSeparator() -> string\n" \ "\n" \ "Gets the decimal separator character used when parsing NUMERIC from the database."; #ifdef PYODBC_LEAK_CHECK static PyObject* mod_leakcheck(PyObject* self, PyObject* args) { UNUSED(self, args); pyodbc_leak_check(); Py_RETURN_NONE; } #endif static PyMethodDef pyodbc_methods[] = { { "connect", (PyCFunction)mod_connect, METH_VARARGS|METH_KEYWORDS, connect_doc }, { "TimeFromTicks", (PyCFunction)mod_timefromticks, METH_VARARGS, timefromticks_doc }, { "DateFromTicks", (PyCFunction)mod_datefromticks, METH_VARARGS, datefromticks_doc }, { "setDecimalSeparator", (PyCFunction)mod_setdecimalsep, METH_VARARGS, setdecimalsep_doc }, { "getDecimalSeparator", (PyCFunction)mod_getdecimalsep, METH_NOARGS, getdecimalsep_doc }, { "TimestampFromTicks", (PyCFunction)mod_timestampfromticks, METH_VARARGS, timestampfromticks_doc }, { "drivers", (PyCFunction)mod_drivers, METH_NOARGS, drivers_doc }, { "dataSources", (PyCFunction)mod_datasources, METH_NOARGS, datasources_doc }, #ifdef PYODBC_LEAK_CHECK { "leakcheck", (PyCFunction)mod_leakcheck, METH_NOARGS, 0 }, #endif { 0, 0, 0, 0 } }; static void ErrorInit() { // Called during startup to initialize any variables that will be freed by ErrorCleanup. Error = 0; Warning = 0; InterfaceError = 0; DatabaseError = 0; InternalError = 0; OperationalError = 0; ProgrammingError = 0; IntegrityError = 0; DataError = 0; NotSupportedError = 0; } static void ErrorCleanup() { // Called when an error occurs during initialization to release any objects we may have accessed. Make sure each // item released was initialized to zero. (Static objects are -- non-statics should be initialized in ErrorInit.) Py_XDECREF(Error); Py_XDECREF(Warning); Py_XDECREF(InterfaceError); Py_XDECREF(DatabaseError); Py_XDECREF(InternalError); Py_XDECREF(OperationalError); Py_XDECREF(ProgrammingError); Py_XDECREF(IntegrityError); Py_XDECREF(DataError); Py_XDECREF(NotSupportedError); } struct ConstantDef { const char* szName; int value; }; #define MAKECONST(v) { #v, v } static const ConstantDef aConstants[] = { MAKECONST(SQL_WMETADATA), MAKECONST(SQL_UNKNOWN_TYPE), MAKECONST(SQL_CHAR), MAKECONST(SQL_VARCHAR), MAKECONST(SQL_LONGVARCHAR), MAKECONST(SQL_WCHAR), MAKECONST(SQL_WVARCHAR), MAKECONST(SQL_WLONGVARCHAR), MAKECONST(SQL_DECIMAL), MAKECONST(SQL_NUMERIC), MAKECONST(SQL_SMALLINT), MAKECONST(SQL_INTEGER), MAKECONST(SQL_REAL), MAKECONST(SQL_FLOAT), MAKECONST(SQL_DOUBLE), MAKECONST(SQL_BIT), MAKECONST(SQL_TINYINT), MAKECONST(SQL_BIGINT), MAKECONST(SQL_BINARY), MAKECONST(SQL_VARBINARY), MAKECONST(SQL_LONGVARBINARY), MAKECONST(SQL_TYPE_DATE), MAKECONST(SQL_TYPE_TIME), MAKECONST(SQL_TYPE_TIMESTAMP), MAKECONST(SQL_SS_TIME2), MAKECONST(SQL_SS_XML), MAKECONST(SQL_INTERVAL_MONTH), MAKECONST(SQL_INTERVAL_YEAR), MAKECONST(SQL_INTERVAL_YEAR_TO_MONTH), MAKECONST(SQL_INTERVAL_DAY), MAKECONST(SQL_INTERVAL_HOUR), MAKECONST(SQL_INTERVAL_MINUTE), MAKECONST(SQL_INTERVAL_SECOND), MAKECONST(SQL_INTERVAL_DAY_TO_HOUR), MAKECONST(SQL_INTERVAL_DAY_TO_MINUTE), MAKECONST(SQL_INTERVAL_DAY_TO_SECOND), MAKECONST(SQL_INTERVAL_HOUR_TO_MINUTE), MAKECONST(SQL_INTERVAL_HOUR_TO_SECOND), MAKECONST(SQL_INTERVAL_MINUTE_TO_SECOND), MAKECONST(SQL_GUID), MAKECONST(SQL_NULLABLE), MAKECONST(SQL_NO_NULLS), MAKECONST(SQL_NULLABLE_UNKNOWN), // MAKECONST(SQL_INDEX_BTREE), // MAKECONST(SQL_INDEX_CLUSTERED), // MAKECONST(SQL_INDEX_CONTENT), // MAKECONST(SQL_INDEX_HASHED), // MAKECONST(SQL_INDEX_OTHER), MAKECONST(SQL_SCOPE_CURROW), MAKECONST(SQL_SCOPE_TRANSACTION), MAKECONST(SQL_SCOPE_SESSION), MAKECONST(SQL_PC_UNKNOWN), MAKECONST(SQL_PC_NOT_PSEUDO), MAKECONST(SQL_PC_PSEUDO), // SQLGetInfo MAKECONST(SQL_ACCESSIBLE_PROCEDURES), MAKECONST(SQL_ACCESSIBLE_TABLES), MAKECONST(SQL_ACTIVE_ENVIRONMENTS), MAKECONST(SQL_AGGREGATE_FUNCTIONS), MAKECONST(SQL_ALTER_DOMAIN), MAKECONST(SQL_ALTER_TABLE), MAKECONST(SQL_ASYNC_MODE), MAKECONST(SQL_BATCH_ROW_COUNT), MAKECONST(SQL_BATCH_SUPPORT), MAKECONST(SQL_BOOKMARK_PERSISTENCE), MAKECONST(SQL_CATALOG_LOCATION), MAKECONST(SQL_CATALOG_NAME), MAKECONST(SQL_CATALOG_NAME_SEPARATOR), MAKECONST(SQL_CATALOG_TERM), MAKECONST(SQL_CATALOG_USAGE), MAKECONST(SQL_COLLATION_SEQ), MAKECONST(SQL_COLUMN_ALIAS), MAKECONST(SQL_CONCAT_NULL_BEHAVIOR), MAKECONST(SQL_CONVERT_VARCHAR), MAKECONST(SQL_CORRELATION_NAME), MAKECONST(SQL_CREATE_ASSERTION), MAKECONST(SQL_CREATE_CHARACTER_SET), MAKECONST(SQL_CREATE_COLLATION), MAKECONST(SQL_CREATE_DOMAIN), MAKECONST(SQL_CREATE_SCHEMA), MAKECONST(SQL_CREATE_TABLE), MAKECONST(SQL_CREATE_TRANSLATION), MAKECONST(SQL_CREATE_VIEW), MAKECONST(SQL_CURSOR_COMMIT_BEHAVIOR), MAKECONST(SQL_CURSOR_ROLLBACK_BEHAVIOR), // MAKECONST(SQL_CURSOR_ROLLBACK_SQL_CURSOR_SENSITIVITY), MAKECONST(SQL_DATABASE_NAME), MAKECONST(SQL_DATA_SOURCE_NAME), MAKECONST(SQL_DATA_SOURCE_READ_ONLY), MAKECONST(SQL_DATETIME_LITERALS), MAKECONST(SQL_DBMS_NAME), MAKECONST(SQL_DBMS_VER), MAKECONST(SQL_DDL_INDEX), MAKECONST(SQL_DEFAULT_TXN_ISOLATION), MAKECONST(SQL_DESCRIBE_PARAMETER), MAKECONST(SQL_DM_VER), MAKECONST(SQL_DRIVER_HDESC), MAKECONST(SQL_DRIVER_HENV), MAKECONST(SQL_DRIVER_HLIB), MAKECONST(SQL_DRIVER_HSTMT), MAKECONST(SQL_DRIVER_NAME), MAKECONST(SQL_DRIVER_ODBC_VER), MAKECONST(SQL_DRIVER_VER), MAKECONST(SQL_DROP_ASSERTION), MAKECONST(SQL_DROP_CHARACTER_SET), MAKECONST(SQL_DROP_COLLATION), MAKECONST(SQL_DROP_DOMAIN), MAKECONST(SQL_DROP_SCHEMA), MAKECONST(SQL_DROP_TABLE), MAKECONST(SQL_DROP_TRANSLATION), MAKECONST(SQL_DROP_VIEW), MAKECONST(SQL_DYNAMIC_CURSOR_ATTRIBUTES1), MAKECONST(SQL_DYNAMIC_CURSOR_ATTRIBUTES2), MAKECONST(SQL_EXPRESSIONS_IN_ORDERBY), MAKECONST(SQL_FILE_USAGE), MAKECONST(SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1), MAKECONST(SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2), MAKECONST(SQL_GETDATA_EXTENSIONS), MAKECONST(SQL_GROUP_BY), MAKECONST(SQL_IDENTIFIER_CASE), MAKECONST(SQL_IDENTIFIER_QUOTE_CHAR), MAKECONST(SQL_INDEX_KEYWORDS), MAKECONST(SQL_INFO_SCHEMA_VIEWS), MAKECONST(SQL_INSERT_STATEMENT), MAKECONST(SQL_INTEGRITY), MAKECONST(SQL_KEYSET_CURSOR_ATTRIBUTES1), MAKECONST(SQL_KEYSET_CURSOR_ATTRIBUTES2), MAKECONST(SQL_KEYWORDS), MAKECONST(SQL_LIKE_ESCAPE_CLAUSE), MAKECONST(SQL_MAX_ASYNC_CONCURRENT_STATEMENTS), MAKECONST(SQL_MAX_BINARY_LITERAL_LEN), MAKECONST(SQL_MAX_CATALOG_NAME_LEN), MAKECONST(SQL_MAX_CHAR_LITERAL_LEN), MAKECONST(SQL_MAX_COLUMNS_IN_GROUP_BY), MAKECONST(SQL_MAX_COLUMNS_IN_INDEX), MAKECONST(SQL_MAX_COLUMNS_IN_ORDER_BY), MAKECONST(SQL_MAX_COLUMNS_IN_SELECT), MAKECONST(SQL_MAX_COLUMNS_IN_TABLE), MAKECONST(SQL_MAX_COLUMN_NAME_LEN), MAKECONST(SQL_MAX_CONCURRENT_ACTIVITIES), MAKECONST(SQL_MAX_CURSOR_NAME_LEN), MAKECONST(SQL_MAX_DRIVER_CONNECTIONS), MAKECONST(SQL_MAX_IDENTIFIER_LEN), MAKECONST(SQL_MAX_INDEX_SIZE), MAKECONST(SQL_MAX_PROCEDURE_NAME_LEN), MAKECONST(SQL_MAX_ROW_SIZE), MAKECONST(SQL_MAX_ROW_SIZE_INCLUDES_LONG), MAKECONST(SQL_MAX_SCHEMA_NAME_LEN), MAKECONST(SQL_MAX_STATEMENT_LEN), MAKECONST(SQL_MAX_TABLES_IN_SELECT), MAKECONST(SQL_MAX_TABLE_NAME_LEN), MAKECONST(SQL_MAX_USER_NAME_LEN), MAKECONST(SQL_MULTIPLE_ACTIVE_TXN), MAKECONST(SQL_MULT_RESULT_SETS), MAKECONST(SQL_NEED_LONG_DATA_LEN), MAKECONST(SQL_NON_NULLABLE_COLUMNS), MAKECONST(SQL_NULL_COLLATION), MAKECONST(SQL_NUMERIC_FUNCTIONS), MAKECONST(SQL_ODBC_INTERFACE_CONFORMANCE), MAKECONST(SQL_ODBC_VER), MAKECONST(SQL_OJ_CAPABILITIES), MAKECONST(SQL_ORDER_BY_COLUMNS_IN_SELECT), MAKECONST(SQL_PARAM_ARRAY_ROW_COUNTS), MAKECONST(SQL_PARAM_ARRAY_SELECTS), MAKECONST(SQL_PARAM_TYPE_UNKNOWN), MAKECONST(SQL_PARAM_INPUT), MAKECONST(SQL_PARAM_INPUT_OUTPUT), MAKECONST(SQL_PARAM_OUTPUT), MAKECONST(SQL_RETURN_VALUE), MAKECONST(SQL_RESULT_COL), MAKECONST(SQL_PROCEDURES), MAKECONST(SQL_PROCEDURE_TERM), MAKECONST(SQL_QUOTED_IDENTIFIER_CASE), MAKECONST(SQL_ROW_UPDATES), MAKECONST(SQL_SCHEMA_TERM), MAKECONST(SQL_SCHEMA_USAGE), MAKECONST(SQL_SCROLL_OPTIONS), MAKECONST(SQL_SEARCH_PATTERN_ESCAPE), MAKECONST(SQL_SERVER_NAME), MAKECONST(SQL_SPECIAL_CHARACTERS), MAKECONST(SQL_SQL92_DATETIME_FUNCTIONS), MAKECONST(SQL_SQL92_FOREIGN_KEY_DELETE_RULE), MAKECONST(SQL_SQL92_FOREIGN_KEY_UPDATE_RULE), MAKECONST(SQL_SQL92_GRANT), MAKECONST(SQL_SQL92_NUMERIC_VALUE_FUNCTIONS), MAKECONST(SQL_SQL92_PREDICATES), MAKECONST(SQL_SQL92_RELATIONAL_JOIN_OPERATORS), MAKECONST(SQL_SQL92_REVOKE), MAKECONST(SQL_SQL92_ROW_VALUE_CONSTRUCTOR), MAKECONST(SQL_SQL92_STRING_FUNCTIONS), MAKECONST(SQL_SQL92_VALUE_EXPRESSIONS), MAKECONST(SQL_SQL_CONFORMANCE), MAKECONST(SQL_STANDARD_CLI_CONFORMANCE), MAKECONST(SQL_STATIC_CURSOR_ATTRIBUTES1), MAKECONST(SQL_STATIC_CURSOR_ATTRIBUTES2), MAKECONST(SQL_STRING_FUNCTIONS), MAKECONST(SQL_SUBQUERIES), MAKECONST(SQL_SYSTEM_FUNCTIONS), MAKECONST(SQL_TABLE_TERM), MAKECONST(SQL_TIMEDATE_ADD_INTERVALS), MAKECONST(SQL_TIMEDATE_DIFF_INTERVALS), MAKECONST(SQL_TIMEDATE_FUNCTIONS), MAKECONST(SQL_TXN_CAPABLE), MAKECONST(SQL_TXN_ISOLATION_OPTION), MAKECONST(SQL_UNION), MAKECONST(SQL_USER_NAME), MAKECONST(SQL_XOPEN_CLI_YEAR), // Connection Attributes MAKECONST(SQL_ACCESS_MODE), MAKECONST(SQL_ATTR_ACCESS_MODE), MAKECONST(SQL_AUTOCOMMIT), MAKECONST(SQL_ATTR_AUTOCOMMIT), MAKECONST(SQL_LOGIN_TIMEOUT), MAKECONST(SQL_ATTR_LOGIN_TIMEOUT), MAKECONST(SQL_OPT_TRACE), MAKECONST(SQL_ATTR_TRACE), MAKECONST(SQL_OPT_TRACEFILE), MAKECONST(SQL_ATTR_TRACEFILE), MAKECONST(SQL_TRANSLATE_DLL), MAKECONST(SQL_ATTR_TRANSLATE_LIB), MAKECONST(SQL_TRANSLATE_OPTION), MAKECONST(SQL_ATTR_TRANSLATE_OPTION), MAKECONST(SQL_TXN_ISOLATION), MAKECONST(SQL_ATTR_TXN_ISOLATION), MAKECONST(SQL_CURRENT_QUALIFIER), MAKECONST(SQL_ATTR_CURRENT_CATALOG), MAKECONST(SQL_ODBC_CURSORS), MAKECONST(SQL_ATTR_ODBC_CURSORS), MAKECONST(SQL_QUIET_MODE), MAKECONST(SQL_ATTR_QUIET_MODE), MAKECONST(SQL_PACKET_SIZE), MAKECONST(SQL_ATTR_ANSI_APP), // SQL_CONVERT_X MAKECONST(SQL_CONVERT_FUNCTIONS), MAKECONST(SQL_CONVERT_BIGINT), MAKECONST(SQL_CONVERT_BINARY), MAKECONST(SQL_CONVERT_BIT), MAKECONST(SQL_CONVERT_CHAR), MAKECONST(SQL_CONVERT_DATE), MAKECONST(SQL_CONVERT_DECIMAL), MAKECONST(SQL_CONVERT_DOUBLE), MAKECONST(SQL_CONVERT_FLOAT), MAKECONST(SQL_CONVERT_GUID), MAKECONST(SQL_CONVERT_INTEGER), MAKECONST(SQL_CONVERT_INTERVAL_DAY_TIME), MAKECONST(SQL_CONVERT_INTERVAL_YEAR_MONTH), MAKECONST(SQL_CONVERT_LONGVARBINARY), MAKECONST(SQL_CONVERT_LONGVARCHAR), MAKECONST(SQL_CONVERT_NUMERIC), MAKECONST(SQL_CONVERT_REAL), MAKECONST(SQL_CONVERT_SMALLINT), MAKECONST(SQL_CONVERT_TIME), MAKECONST(SQL_CONVERT_TIMESTAMP), MAKECONST(SQL_CONVERT_TINYINT), MAKECONST(SQL_CONVERT_VARBINARY), MAKECONST(SQL_CONVERT_VARCHAR), MAKECONST(SQL_CONVERT_WCHAR), MAKECONST(SQL_CONVERT_WLONGVARCHAR), MAKECONST(SQL_CONVERT_WVARCHAR), // SQLSetConnectAttr transaction isolation MAKECONST(SQL_ATTR_TXN_ISOLATION), MAKECONST(SQL_TXN_READ_UNCOMMITTED), MAKECONST(SQL_TXN_READ_COMMITTED), MAKECONST(SQL_TXN_REPEATABLE_READ), MAKECONST(SQL_TXN_SERIALIZABLE), // Outer Join Capabilities MAKECONST(SQL_OJ_LEFT), MAKECONST(SQL_OJ_RIGHT), MAKECONST(SQL_OJ_FULL), MAKECONST(SQL_OJ_NESTED), MAKECONST(SQL_OJ_NOT_ORDERED), MAKECONST(SQL_OJ_INNER), MAKECONST(SQL_OJ_ALL_COMPARISON_OPS), }; static bool CreateExceptions() { for (unsigned int i = 0; i < _countof(aExcInfos); i++) { ExcInfo& info = aExcInfos[i]; PyObject* classdict = PyDict_New(); if (!classdict) return false; PyObject* doc = PyString_FromString(info.szDoc); if (!doc) { Py_DECREF(classdict); return false; } PyDict_SetItemString(classdict, "__doc__", doc); Py_DECREF(doc); *info.ppexc = PyErr_NewException((char*)info.szFullName, *info.ppexcParent, classdict); if (*info.ppexc == 0) { Py_DECREF(classdict); return false; } // Keep a reference for our internal (C++) use. Py_INCREF(*info.ppexc); PyModule_AddObject(pModule, (char*)info.szName, *info.ppexc); } return true; } #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "pyodbc", // m_name module_doc, -1, // m_size pyodbc_methods, // m_methods 0, // m_reload 0, // m_traverse 0, // m_clear 0, // m_free }; #define MODRETURN(v) v #else #define MODRETURN(v) #endif PyMODINIT_FUNC #if PY_MAJOR_VERSION >= 3 PyInit_pyodbc() #else initpyodbc(void) #endif { ErrorInit(); if (PyType_Ready(&ConnectionType) < 0 || PyType_Ready(&CursorType) < 0 || PyType_Ready(&RowType) < 0 || PyType_Ready(&CnxnInfoType) < 0) return MODRETURN(0); Object module; #if PY_MAJOR_VERSION >= 3 module.Attach(PyModule_Create(&moduledef)); #else module.Attach(Py_InitModule4("pyodbc", pyodbc_methods, module_doc, NULL, PYTHON_API_VERSION)); #endif pModule = module.Get(); if (!module || !import_types() || !CreateExceptions()) return MODRETURN(0); init_locale_info(); const char* szVersion = TOSTRING(PYODBC_VERSION); PyModule_AddStringConstant(module, "version", (char*)szVersion); PyModule_AddIntConstant(module, "threadsafety", 1); PyModule_AddStringConstant(module, "apilevel", "2.0"); PyModule_AddStringConstant(module, "paramstyle", "qmark"); PyModule_AddObject(module, "pooling", Py_True); Py_INCREF(Py_True); PyModule_AddObject(module, "lowercase", Py_False); Py_INCREF(Py_False); PyModule_AddObject(module, "native_uuid", Py_False); Py_INCREF(Py_False); PyModule_AddObject(module, "Connection", (PyObject*)&ConnectionType); Py_INCREF((PyObject*)&ConnectionType); PyModule_AddObject(module, "Cursor", (PyObject*)&CursorType); Py_INCREF((PyObject*)&CursorType); PyModule_AddObject(module, "Row", (PyObject*)&RowType); Py_INCREF((PyObject*)&RowType); // Add the SQL_XXX defines from ODBC. for (unsigned int i = 0; i < _countof(aConstants); i++) PyModule_AddIntConstant(module, (char*)aConstants[i].szName, aConstants[i].value); PyModule_AddObject(module, "Date", (PyObject*)PyDateTimeAPI->DateType); Py_INCREF((PyObject*)PyDateTimeAPI->DateType); PyModule_AddObject(module, "Time", (PyObject*)PyDateTimeAPI->TimeType); Py_INCREF((PyObject*)PyDateTimeAPI->TimeType); PyModule_AddObject(module, "Timestamp", (PyObject*)PyDateTimeAPI->DateTimeType); Py_INCREF((PyObject*)PyDateTimeAPI->DateTimeType); PyModule_AddObject(module, "DATETIME", (PyObject*)PyDateTimeAPI->DateTimeType); Py_INCREF((PyObject*)PyDateTimeAPI->DateTimeType); PyModule_AddObject(module, "STRING", (PyObject*)&PyString_Type); Py_INCREF((PyObject*)&PyString_Type); PyModule_AddObject(module, "NUMBER", (PyObject*)&PyFloat_Type); Py_INCREF((PyObject*)&PyFloat_Type); PyModule_AddObject(module, "ROWID", (PyObject*)&PyInt_Type); Py_INCREF((PyObject*)&PyInt_Type); PyObject* binary_type; #if PY_VERSION_HEX >= 0x02060000 binary_type = (PyObject*)&PyByteArray_Type; #else binary_type = (PyObject*)&PyBuffer_Type; #endif PyModule_AddObject(module, "BINARY", binary_type); Py_INCREF(binary_type); PyModule_AddObject(module, "Binary", binary_type); Py_INCREF(binary_type); I(null_binary != 0); // must be initialized first PyModule_AddObject(module, "BinaryNull", null_binary); PyModule_AddIntConstant(module, "UNICODE_SIZE", sizeof(Py_UNICODE)); PyModule_AddIntConstant(module, "SQLWCHAR_SIZE", sizeof(SQLWCHAR)); if (!PyErr_Occurred()) { module.Detach(); } else { ErrorCleanup(); } return MODRETURN(pModule); } #ifdef WINVER BOOL WINAPI DllMain( HINSTANCE hMod, DWORD fdwReason, LPVOID lpvReserved ) { UNUSED(hMod, fdwReason, lpvReserved); return TRUE; } #endif static PyObject* MakeConnectionString(PyObject* existing, PyObject* parts) { // Creates a connection string from an optional existing connection string plus a dictionary of keyword value // pairs. // // existing // Optional Unicode connection string we will be appending to. Used when a partial connection string is passed // in, followed by keyword parameters: // // connect("driver={x};database={y}", user='z') // // parts // A dictionary of text keywords and text values that will be appended. I(PyUnicode_Check(existing)); Py_ssize_t length = 0; // length in *characters* if (existing) length = Text_Size(existing) + 1; // + 1 to add a trailing semicolon Py_ssize_t pos = 0; PyObject* key = 0; PyObject* value = 0; while (PyDict_Next(parts, &pos, &key, &value)) { length += Text_Size(key) + 1 + Text_Size(value) + 1; // key=value; } PyObject* result = PyUnicode_FromUnicode(0, length); if (!result) return 0; Py_UNICODE* buffer = PyUnicode_AS_UNICODE(result); Py_ssize_t offset = 0; if (existing) { offset += TextCopyToUnicode(&buffer[offset], existing); buffer[offset++] = (Py_UNICODE)';'; } pos = 0; while (PyDict_Next(parts, &pos, &key, &value)) { offset += TextCopyToUnicode(&buffer[offset], key); buffer[offset++] = (Py_UNICODE)'='; offset += TextCopyToUnicode(&buffer[offset], value); buffer[offset++] = (Py_UNICODE)';'; } I(offset == length); return result; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/pyodbcmodule.h0000664000175000017500000000455200000000000020057 0ustar00mkleehammermkleehammer/* * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _PYPGMODULE_H #define _PYPGMODULE_H #define SQL_WMETADATA -888 // This is a custom constant that can be passed to Connection.setencoding. Pick a value that // is very different from SQL_CHAR and SQL_WCHAR and similar items. extern PyObject* Error; extern PyObject* Warning; extern PyObject* InterfaceError; extern PyObject* DatabaseError; extern PyObject* InternalError; extern PyObject* OperationalError; extern PyObject* ProgrammingError; extern PyObject* IntegrityError; extern PyObject* DataError; extern PyObject* NotSupportedError; /* Returns the given class, specific to the current thread's interpreter. For performance these are cached for each thread. This is for internal use only, so we'll cache using only the class name. Make sure they are unique. (That is, don't try to import classes with the same name from two different modules.) */ PyObject* GetClassForThread(const char* szModule, const char* szClass); bool IsInstanceForThread(PyObject* param, const char* szModule, const char* szClass, PyObject** pcls); extern PyObject* null_binary; extern HENV henv; extern PyTypeObject RowType; extern PyTypeObject CursorType; extern PyTypeObject ConnectionType; // Thd pyodbc module. extern PyObject* pModule; inline bool lowercase() { return PyObject_GetAttrString(pModule, "lowercase") == Py_True; } extern Py_UNICODE chDecimal; bool UseNativeUUID(); // Returns True if pyodbc.native_uuid is true, meaning uuid.UUID objects should be returned. #endif // _PYPGMODULE_H ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/resource.h0000664000175000017500000000060200000000000017210 0ustar00mkleehammermkleehammer//{{NO_DEPENDENCIES}} // Microsoft Visual C++ generated include file. // Used by pyodbc.rc // Next default values for new objects // #ifdef APSTUDIO_INVOKED #ifndef APSTUDIO_READONLY_SYMBOLS #define _APS_NEXT_RESOURCE_VALUE 101 #define _APS_NEXT_COMMAND_VALUE 40001 #define _APS_NEXT_CONTROL_VALUE 1001 #define _APS_NEXT_SYMED_VALUE 101 #endif #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/row.cpp0000664000175000017500000004141600000000000016533 0ustar00mkleehammermkleehammer // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS // OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pyodbc.h" #include "pyodbcmodule.h" #include "row.h" #include "wrapper.h" struct Row { // A Row must act like a sequence (a tuple of results) to meet the DB API specification, but we also allow values // to be accessed via lowercased column names. We also supply a `columns` attribute which returns the list of // column names. PyObject_HEAD // cursor.description, accessed as _description PyObject* description; // A Python dictionary mapping from column name to a PyInteger, used to access columns by name. PyObject* map_name_to_index; // The number of values in apValues. Py_ssize_t cValues; // The column values, stored as an array. PyObject** apValues; }; #define Row_Check(op) PyObject_TypeCheck(op, &RowType) #define Row_CheckExact(op) (Py_TYPE(op) == &RowType) void FreeRowValues(Py_ssize_t cValues, PyObject** apValues) { // Frees each pointer in the apValues buffer *and* the buffer itself. if (apValues) { for (Py_ssize_t i = 0; i < cValues; i++) Py_XDECREF(apValues[i]); pyodbc_free(apValues); } } static void Row_dealloc(PyObject* o) { // Note: Now that __newobj__ is available, our variables could be zero... Row* self = (Row*)o; Py_XDECREF(self->description); Py_XDECREF(self->map_name_to_index); FreeRowValues(self->cValues, self->apValues); PyObject_Del(self); } static PyObject* Row_getstate(PyObject* self) { // Returns a tuple containing the saved state. We don't really support empty rows, but unfortunately they can be // created now by the new constructor which was necessary for implementing pickling. In that case (everything is // zero), an empty tuple is returned. // Not exposed. Row* row = (Row*)self; if (row->description == 0) return PyTuple_New(0); Tuple state(PyTuple_New(2 + row->cValues)); if (!state.IsValid()) return 0; state[0] = row->description; state[1] = row->map_name_to_index; for (int i = 0; i < row->cValues; i++) state[i+2] = row->apValues[i]; for (int i = 0; i < 2 + row->cValues; i++) Py_XINCREF(state[i]); return state.Detach(); } static PyObject* new_check(PyObject* args) { // We don't support a normal constructor, so only allow this for unpickling. There should be a single arg that was // returned by Row_reduce. Make sure the sizes match. The desc and map should have one entry per column, which // should equal the number of remaining items. if (PyTuple_GET_SIZE(args) < 3) return 0; PyObject* desc = PyTuple_GET_ITEM(args, 0); PyObject* map = PyTuple_GET_ITEM(args, 1); if (!PyTuple_CheckExact(desc) || !PyDict_CheckExact(map)) return 0; Py_ssize_t cols = PyTuple_GET_SIZE(desc); if (PyDict_Size(map) != cols || PyTuple_GET_SIZE(args) - 2 != cols) return 0; PyObject** apValues = (PyObject**)pyodbc_malloc(sizeof(PyObject*) * cols); if (!apValues) return 0; for (int i = 0; i < cols; i++) { apValues[i] = PyTuple_GET_ITEM(args, i+2); Py_INCREF(apValues[i]); } // Row_Internal will incref desc and map. If something goes wrong, it will free apValues. return (PyObject*)Row_InternalNew(desc, map, cols, apValues); } static PyObject* Row_new(PyTypeObject* type, PyObject* args, PyObject* kwargs) { UNUSED(kwargs); PyObject* row = new_check(args); if (row == 0) PyErr_SetString(PyExc_TypeError, "cannot create 'pyodbc.Row' instances"); return row; } Row* Row_InternalNew(PyObject* description, PyObject* map_name_to_index, Py_ssize_t cValues, PyObject** apValues) { // Called by other modules to create rows. Takes ownership of apValues. #ifdef _MSC_VER #pragma warning(disable : 4365) #endif Row* row = PyObject_NEW(Row, &RowType); #ifdef _MSC_VER #pragma warning(default : 4365) #endif if (row) { Py_INCREF(description); row->description = description; Py_INCREF(map_name_to_index); row->map_name_to_index = map_name_to_index; row->apValues = apValues; row->cValues = cValues; } else { FreeRowValues(cValues, apValues); } return row; } static PyObject* Row_getattro(PyObject* o, PyObject* name) { // Called to handle 'row.colname'. Row* self = (Row*)o; PyObject* index = PyDict_GetItem(self->map_name_to_index, name); if (index) { Py_ssize_t i = PyNumber_AsSsize_t(index, 0); Py_INCREF(self->apValues[i]); return self->apValues[i]; } return PyObject_GenericGetAttr(o, name); } static Py_ssize_t Row_length(PyObject* self) { return ((Row*)self)->cValues; } static int Row_contains(PyObject* o, PyObject* el) { // Implementation of contains. The documentation is not good (non-existent?), so I copied the following from the // PySequence_Contains documentation: Return -1 if error; 1 if ob in seq; 0 if ob not in seq. Row* self = (Row*)o; int cmp = 0; for (Py_ssize_t i = 0, c = self->cValues ; cmp == 0 && i < c; ++i) cmp = PyObject_RichCompareBool(el, self->apValues[i], Py_EQ); return cmp; } PyObject* Row_item(PyObject* o, Py_ssize_t i) { // Apparently, negative indexes are handled by magic ;) -- they never make it here. Row* self = (Row*)o; if (i < 0 || i >= self->cValues) { PyErr_SetString(PyExc_IndexError, "tuple index out of range"); return NULL; } Py_INCREF(self->apValues[i]); return self->apValues[i]; } static int Row_ass_item(PyObject* o, Py_ssize_t i, PyObject* v) { // Implements row[i] = value. Row* self = (Row*)o; if (i < 0 || i >= self->cValues) { PyErr_SetString(PyExc_IndexError, "Row assignment index out of range"); return -1; } Py_XDECREF(self->apValues[i]); Py_INCREF(v); self->apValues[i] = v; return 0; } static int Row_setattro(PyObject* o, PyObject *name, PyObject* v) { Row* self = (Row*)o; PyObject* index = PyDict_GetItem(self->map_name_to_index, name); if (index) return Row_ass_item(o, PyNumber_AsSsize_t(index, 0), v); return PyObject_GenericSetAttr(o, name, v); } static PyObject* Row_repr(PyObject* o) { Row* self = (Row*)o; if (self->cValues == 0) return PyString_FromString("()"); Object pieces(PyTuple_New(self->cValues)); if (!pieces) return 0; Py_ssize_t length = 2 + (2 * (self->cValues-1)); // parens + ', ' separators for (Py_ssize_t i = 0; i < self->cValues; i++) { PyObject* piece = PyObject_Repr(self->apValues[i]); if (!piece) return 0; length += Text_Size(piece); PyTuple_SET_ITEM(pieces.Get(), i, piece); } if (self->cValues == 1) { // Need a trailing comma: (value,) length += 2; } PyObject* result = Text_New(length); if (!result) return 0; TEXT_T* buffer = Text_Buffer(result); Py_ssize_t offset = 0; buffer[offset++] = '('; for (Py_ssize_t i = 0; i < self->cValues; i++) { PyObject* item = PyTuple_GET_ITEM(pieces.Get(), i); memcpy(&buffer[offset], Text_Buffer(item), Text_Size(item) * sizeof(TEXT_T)); offset += Text_Size(item); if (i != self->cValues-1 || self->cValues == 1) { buffer[offset++] = ','; buffer[offset++] = ' '; } } buffer[offset++] = ')'; I(offset == length); return result; } static PyObject* Row_richcompare(PyObject* olhs, PyObject* orhs, int op) { if (!Row_Check(olhs) || !Row_Check(orhs)) { Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } Row* lhs = (Row*)olhs; Row* rhs = (Row*)orhs; if (lhs->cValues != rhs->cValues) { // Different sizes, so use the same rules as the tuple class. bool result; switch (op) { case Py_EQ: result = (lhs->cValues == rhs->cValues); break; case Py_GE: result = (lhs->cValues >= rhs->cValues); break; case Py_GT: result = (lhs->cValues > rhs->cValues); break; case Py_LE: result = (lhs->cValues <= rhs->cValues); break; case Py_LT: result = (lhs->cValues < rhs->cValues); break; case Py_NE: result = (lhs->cValues != rhs->cValues); break; default: // Can't get here, but don't have a cross-compiler way to silence this. result = false; } PyObject* p = result ? Py_True : Py_False; Py_INCREF(p); return p; } for (Py_ssize_t i = 0, c = lhs->cValues; i < c; i++) if (!PyObject_RichCompareBool(lhs->apValues[i], rhs->apValues[i], Py_EQ)) return PyObject_RichCompare(lhs->apValues[i], rhs->apValues[i], op); // All items are equal. switch (op) { case Py_EQ: case Py_GE: case Py_LE: Py_RETURN_TRUE; case Py_GT: case Py_LT: case Py_NE: break; } Py_RETURN_FALSE; } static PyObject* Row_subscript(PyObject* o, PyObject* key) { Row* row = (Row*)o; if (PyIndex_Check(key)) { Py_ssize_t i = PyNumber_AsSsize_t(key, PyExc_IndexError); if (i == -1 && PyErr_Occurred()) return 0; if (i < 0) i += row->cValues; if (i < 0 || i >= row->cValues) return PyErr_Format(PyExc_IndexError, "row index out of range index=%d len=%d", (int)i, (int)row->cValues); Py_INCREF(row->apValues[i]); return row->apValues[i]; } if (PySlice_Check(key)) { Py_ssize_t start, stop, step, slicelength; #if PY_VERSION_HEX >= 0x03020000 if (PySlice_GetIndicesEx(key, row->cValues, &start, &stop, &step, &slicelength) < 0) return 0; #else if (PySlice_GetIndicesEx((PySliceObject*)key, row->cValues, &start, &stop, &step, &slicelength) < 0) return 0; #endif if (slicelength <= 0) return PyTuple_New(0); if (start == 0 && step == 1 && slicelength == row->cValues) { Py_INCREF(o); return o; } Object result(PyTuple_New(slicelength)); if (!result) return 0; for (Py_ssize_t i = 0, index = start; i < slicelength; i++, index += step) { PyTuple_SET_ITEM(result.Get(), i, row->apValues[index]); Py_INCREF(row->apValues[index]); } return result.Detach(); } return PyErr_Format(PyExc_TypeError, "row indices must be integers, not %.200s", Py_TYPE(key)->tp_name); } static PySequenceMethods row_as_sequence = { Row_length, // sq_length 0, // sq_concat 0, // sq_repeat Row_item, // sq_item 0, // was_sq_slice Row_ass_item, // sq_ass_item 0, // sq_ass_slice Row_contains, // sq_contains }; static PyMappingMethods row_as_mapping = { Row_length, // mp_length Row_subscript, // mp_subscript 0, // mp_ass_subscript }; static char description_doc[] = "The Cursor.description sequence from the Cursor that created this row."; static PyMemberDef Row_members[] = { { "cursor_description", T_OBJECT_EX, offsetof(Row, description), READONLY, description_doc }, { 0 } }; static PyObject* Row_reduce(PyObject* self, PyObject* args) { PyObject* state = Row_getstate(self); if (!state) return 0; return Py_BuildValue("ON", Py_TYPE(self), state); } static PyMethodDef Row_methods[] = { { "__reduce__", (PyCFunction)Row_reduce, METH_NOARGS, 0 }, { 0, 0, 0, 0 } }; static char row_doc[] = "Row objects are sequence objects that hold query results.\n" "\n" "They are similar to tuples in that they cannot be resized and new attributes\n" "cannot be added, but individual elements can be replaced. This allows data to\n" "be \"fixed up\" after being fetched. (For example, datetimes may be replaced by\n" "those with time zones attached.)\n" "\n" " row[0] = row[0].replace(tzinfo=timezone)\n" " print row[0]\n" "\n" "Additionally, individual values can be optionally be accessed or replaced by\n" "name. Non-alphanumeric characters are replaced with an underscore.\n" "\n" " cursor.execute(\"select customer_id, [Name With Spaces] from tmp\")\n" " row = cursor.fetchone()\n" " print row.customer_id, row.Name_With_Spaces\n" "\n" "If using this non-standard feature, it is often convenient to specify the name\n" "using the SQL 'as' keyword:\n" "\n" " cursor.execute(\"select count(*) as total from tmp\")\n" " row = cursor.fetchone()\n" " print row.total"; PyTypeObject RowType = { PyVarObject_HEAD_INIT(NULL, 0) "pyodbc.Row", // tp_name sizeof(Row), // tp_basicsize 0, // tp_itemsize Row_dealloc, // tp_dealloc 0, // tp_print 0, // tp_getattr 0, // tp_setattr 0, // tp_compare Row_repr, // tp_repr 0, // tp_as_number &row_as_sequence, // tp_as_sequence &row_as_mapping, // tp_as_mapping 0, // tp_hash 0, // tp_call 0, // tp_str Row_getattro, // tp_getattro Row_setattro, // tp_setattro 0, // tp_as_buffer Py_TPFLAGS_DEFAULT, // tp_flags row_doc, // tp_doc 0, // tp_traverse 0, // tp_clear Row_richcompare, // tp_richcompare 0, // tp_weaklistoffset 0, // tp_iter 0, // tp_iternext Row_methods, // tp_methods Row_members, // tp_members 0, // tp_getset 0, // tp_base 0, // tp_dict 0, // tp_descr_get 0, // tp_descr_set 0, // tp_dictoffset 0, // tp_init 0, // tp_alloc Row_new, // tp_new 0, // tp_free 0, // tp_is_gc 0, // tp_bases 0, // tp_mro 0, // tp_cache 0, // tp_subclasses 0, // tp_weaklist }; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/row.h0000664000175000017500000000307200000000000016174 0ustar00mkleehammermkleehammer /* * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef ROW_H #define ROW_H struct Row; /* * Used to make a new row from an array of column values. */ Row* Row_InternalNew(PyObject* description, PyObject* map_name_to_index, Py_ssize_t cValues, PyObject** apValues); /* * Dereferences each object in apValues and frees apValue. This is the internal format used by rows. * * cValues: The number of items to free in apValues. * * apValues: The array of values. This can be NULL. */ void FreeRowValues(Py_ssize_t cValues, PyObject** apValues); PyObject* Row_item(PyObject* o, Py_ssize_t i); extern PyTypeObject RowType; #define Row_Check(op) PyObject_TypeCheck(op, &RowType) #define Row_CheckExact(op) (Py_TYPE(op) == &RowType) #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629396312.0 pyodbc-4.0.32/src/textenc.cpp0000664000175000017500000001531500000000000017375 0ustar00mkleehammermkleehammer #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" void SQLWChar::init(PyObject* src, const TextEnc& enc) { // Initialization code common to all of the constructors. // // Convert `src` to SQLWCHAR. static PyObject* nulls = NULL; if (src == 0 || src == Py_None) { psz = 0; isNone = true; return; } isNone = false; // If there are optimized encodings that don't require a temporary object, use them. #if PY_MAJOR_VERSION < 3 if (enc.optenc == OPTENC_RAW && PyString_Check(src)) { psz = (SQLWCHAR*)PyString_AS_STRING(src); return; } #endif #if PY_MAJOR_VERSION >= 3 if (enc.optenc == OPTENC_UTF8 && PyUnicode_Check(src)) { psz = (SQLWCHAR*)PyUnicode_AsUTF8(src); return; } #endif PyObject* pb = 0; #if PY_MAJOR_VERSION == 2 if (PyBytes_Check(src)) { // If this is Python 2, the string could already be encoded as bytes. If the encoding is // different than what we want, we have to decode to Unicode and then re-encode. PyObject* u = PyString_AsDecodedObject(src, 0, "strict"); if (u) src = u; } #endif if (!pb && PyUnicode_Check(src)) pb = PyUnicode_AsEncodedString(src, enc.name, "strict"); if (pb) { // Careful: Some encodings don't return bytes. if (!PyBytes_Check(pb)) { // REVIEW: Error or just return null? psz = 0; Py_DECREF(pb); return; } if(!nulls) nulls = PyBytes_FromStringAndSize("\0\0\0\0", 4); PyBytes_Concat(&pb, nulls); if (!pb) { psz = 0; return; } } else { // If the encoding failed (possibly due to "strict"), it will generate an exception, but // we're going to continue. PyErr_Clear(); psz = 0; } if (pb) { bytes.Attach(pb); psz = (SQLWCHAR*)PyBytes_AS_STRING(pb); } } PyObject* TextEnc::Encode(PyObject* obj) const { #if PY_MAJOR_VERSION < 3 if (optenc == OPTENC_RAW || PyBytes_Size(obj) == 0) { Py_INCREF(obj); return obj; } #endif PyObject* bytes = PyCodec_Encode(obj, name, "strict"); if (bytes && PyErr_Occurred()) { // REVIEW: Issue #206. I am not sure what is going on here, but PyCodec_Encode // sometimes returns bytes but *also* sets an exception saying "'ascii' codec can't // encode characters...". I assume the ascii is from my sys encoding, but it seems to // be a superflous error. Since Cursor.fetchall() looks for exceptions this extraneous // error causes us to throw an exception. // // I'm putting in a work around but we should track down the root cause and report it // to the Python project if it is not ours. PyErr_Clear(); } return bytes; } #if PY_MAJOR_VERSION < 3 PyObject* EncodeStr(PyObject* str, const TextEnc& enc) { if (enc.optenc == OPTENC_RAW || PyBytes_Size(str) == 0) { // No conversion. Py_INCREF(str); return str; } else { // Encode the text with the user's encoding. Object encoded(PyCodec_Encode(str, enc.name, "strict")); if (!encoded) return 0; if (!PyBytes_CheckExact(encoded)) { // Not all encodings return bytes. PyErr_Format(PyExc_TypeError, "Unicode read encoding '%s' returned unexpected data type: %s", enc.name, encoded.Get()->ob_type->tp_name); return 0; } return encoded.Detach(); } } #endif PyObject* TextBufferToObject(const TextEnc& enc, void* pbData, Py_ssize_t cbData) { // cbData // The length of data in bytes (cb == 'count of bytes'). // NB: In each branch we make a check for a zero length string and handle it specially // since PyUnicode_Decode may (will?) fail if we pass a zero-length string. Issue #172 // first pointed this out with shift_jis. I'm not sure if it is a fault in the // implementation of this codec or if others will have it also. PyObject* str; #if PY_MAJOR_VERSION < 3 // The Unicode paths use the same code. if (enc.to == TO_UNICODE) { #endif if (cbData == 0) { str = PyUnicode_FromStringAndSize("", 0); } else { int byteorder = 0; switch (enc.optenc) { case OPTENC_UTF8: str = PyUnicode_DecodeUTF8((char*)pbData, cbData, "strict"); break; case OPTENC_UTF16: byteorder = BYTEORDER_NATIVE; str = PyUnicode_DecodeUTF16((char*)pbData, cbData, "strict", &byteorder); break; case OPTENC_UTF16LE: byteorder = BYTEORDER_LE; str = PyUnicode_DecodeUTF16((char*)pbData, cbData, "strict", &byteorder); break; case OPTENC_UTF16BE: byteorder = BYTEORDER_BE; str = PyUnicode_DecodeUTF16((char*)pbData, cbData, "strict", &byteorder); break; case OPTENC_LATIN1: str = PyUnicode_DecodeLatin1((char*)pbData, cbData, "strict"); break; default: // The user set an encoding by name. str = PyUnicode_Decode((char*)pbData, cbData, enc.name, "strict"); break; } } #if PY_MAJOR_VERSION < 3 } else if (cbData == 0) { str = PyString_FromStringAndSize("", 0); } else if (enc.optenc == OPTENC_RAW) { // No conversion. str = PyString_FromStringAndSize((char*)pbData, cbData); } else { // The user has requested a string object. Unfortunately we don't have // str versions of all of the optimized functions. const char* encoding; switch (enc.optenc) { case OPTENC_UTF8: encoding = "utf-8"; break; case OPTENC_UTF16: encoding = "utf-16"; break; case OPTENC_UTF16LE: encoding = "utf-16-le"; break; case OPTENC_UTF16BE: encoding = "utf-16-be"; break; case OPTENC_LATIN1: encoding = "latin-1"; break; default: encoding = enc.name; } str = PyString_Decode((char*)pbData, cbData, encoding, "strict"); } #endif return str; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/textenc.h0000664000175000017500000001126100000000000017036 0ustar00mkleehammermkleehammer#ifndef _TEXTENC_H #define _TEXTENC_H enum { BYTEORDER_LE = -1, BYTEORDER_NATIVE = 0, BYTEORDER_BE = 1, OPTENC_NONE = 0, // No optimized encoding - use the named encoding OPTENC_RAW = 1, // In Python 2, pass bytes directly to string - no decoder OPTENC_UTF8 = 2, OPTENC_UTF16 = 3, // "Native", so check for BOM and default to BE OPTENC_UTF16BE = 4, OPTENC_UTF16LE = 5, OPTENC_LATIN1 = 6, OPTENC_UTF32 = 7, OPTENC_UTF32LE = 8, OPTENC_UTF32BE = 9, #if PY_MAJOR_VERSION < 3 TO_UNICODE = 1, TO_STR = 2 #endif }; #ifdef WORDS_BIGENDIAN # define OPTENC_UTF16NE OPTENC_UTF16BE # define ENCSTR_UTF16NE "utf-16be" #else # define OPTENC_UTF16NE OPTENC_UTF16LE # define ENCSTR_UTF16NE "utf-16le" #endif typedef unsigned short ODBCCHAR; // I'm not sure why, but unixODBC seems to define SQLWCHAR as wchar_t even with // the size is incorrect. So we might get 4-byte SQLWCHAR on 64-bit Linux even // though it requires 2-byte characters. We have to define our own type to // operate on. enum { ODBCCHAR_SIZE = 2 }; struct TextEnc { // Holds encoding information for reading or writing text. Since some drivers / databases // are not easy to configure efficiently, a separate instance of this structure is // configured for: // // * reading SQL_CHAR // * reading SQL_WCHAR // * writing unicode strings // * writing non-unicode strings (Python 2.7 only) #if PY_MAJOR_VERSION < 3 int to; // The type of object to return if reading from the database: str or unicode. #endif int optenc; // Set to one of the OPTENC constants to indicate whether an optimized encoding is to be // used or a custom one. If OPTENC_NONE, no optimized encoding is set and `name` should be // used. const char* name; // The name of the encoding. This must be freed using `free`. SQLSMALLINT ctype; // The C type to use, SQL_C_CHAR or SQL_C_WCHAR. Normally this matches the SQL type of the // column (SQL_C_CHAR is used for SQL_CHAR, etc.). At least one database reports it has // SQL_WCHAR data even when configured for UTF-8 which is better suited for SQL_C_CHAR. PyObject* Encode(PyObject*) const; // Given a string (unicode or str for 2.7), return a bytes object encoded. This is used // for encoding a Python object for passing to a function expecting SQLCHAR* or SQLWCHAR*. }; struct SQLWChar { // Encodes a Python string to a SQLWCHAR pointer. This should eventually replace the // SQLWchar structure. // // Note: This does *not* increment the refcount! // IMPORTANT: I've made the conscious decision *not* to determine the character count. If // we only had to follow the ODBC specification, it would simply be the number of // characters in the string and would be the bytelen / 2. The problem is drivers that // don't follow the specification and expect things like UTF-8. What length do these // drivers expect? Very, very likely they want the number of *bytes*, not the actual // number of characters. I'm simply going to null terminate and pass SQL_NTS. // // This is a performance penalty when using utf16 since we have to copy the string just to // add the null terminator bytes, but we don't use it very often. If this becomes a // bottleneck, we'll have to revisit this design. SQLWCHAR* psz; bool isNone; Object bytes; // A temporary object holding the decoded bytes if we can't use a pointer into the original // object. SQLWChar(PyObject* src, const char* szEncoding) { TextEnc enc; enc.name = szEncoding; enc.ctype = SQL_C_WCHAR; enc.optenc = (strcmp(szEncoding, "raw") == 0) ? OPTENC_RAW : OPTENC_NONE; init(src, enc); } SQLWChar(PyObject* src, const TextEnc* penc) { init(src, *penc); } SQLWChar(PyObject* src, const TextEnc& enc) { init(src, enc); } bool isValidOrNone() { // Returns true if this object is a valid string *or* None. return isNone || (psz != 0); } bool isValid() { return psz != 0; } private: void init(PyObject* src, const TextEnc& enc); SQLWChar(const SQLWChar&) {} void operator=(const SQLWChar&) {} }; PyObject* TextBufferToObject(const TextEnc& enc, void* p, Py_ssize_t len); // Convert a text buffer to a Python object using the given encoding. // // The buffer can be a SQLCHAR array or SQLWCHAR array. The text encoding // should match it. #endif // _TEXTENC_H ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/src/wrapper.h0000664000175000017500000000376100000000000017052 0ustar00mkleehammermkleehammer#ifndef _WRAPPER_H_ #define _WRAPPER_H_ class Object { // This is a simple wrapper around PyObject pointers to release them when this object goes // out of scope. Note that it does *not* increment the reference count on acquisition but // it *does* decrement the count if you don't use Detach. // // It also does not have a copy constructor and doesn't try to manage passing pointers // around. This is simply used to simplify functions by allowing early exits. Object(const Object& illegal) { } void operator=(const Object& illegal) { } protected: PyObject* p; public: Object(PyObject* _p = 0) { p = _p; } ~Object() { Py_XDECREF(p); } Object& operator=(PyObject* pNew) { Py_XDECREF(p); p = pNew; return *this; } bool IsValid() const { return p != 0; } bool Attach(PyObject* _p) { // Returns true if the new pointer is non-zero. Py_XDECREF(p); p = _p; return (_p != 0); } PyObject* Detach() { PyObject* pT = p; p = 0; return pT; } operator PyObject*() { return p; } operator PyVarObject*() { return (PyVarObject*)p; } operator const bool() { return p != 0; } PyObject* Get() { return p; } }; class Tuple : public Object { private: Tuple(const Tuple& other) {} void operator=(const Tuple& other) {} public: Tuple(PyObject* _p = 0) : Object(_p) { } operator PyTupleObject*() { return (PyTupleObject*)p; } PyObject*& operator[](int i) { I(p != 0); return PyTuple_GET_ITEM(p, i); } Py_ssize_t size() { return p ? PyTuple_GET_SIZE(p) : 0; } }; #ifdef WINVER struct RegKey { HKEY hkey; RegKey() { hkey = 0; } ~RegKey() { if (hkey != 0) RegCloseKey(hkey); } operator HKEY() { return hkey; } }; #endif #endif // _WRAPPER_H_ ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1629404586.2999673 pyodbc-4.0.32/tests2/0000775000175000017500000000000000000000000015647 5ustar00mkleehammermkleehammer././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629143953.0 pyodbc-4.0.32/tests2/accesstests.py0000775000175000017500000005517000000000000020560 0ustar00mkleehammermkleehammer#!/usr/bin/python usage="""\ usage: %prog [options] filename Unit tests for Microsoft Access These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. To run, pass the file EXTENSION of an Access database on the command line: accesstests accdb An empty Access 2000 database (empty.mdb) or an empty Access 2007 database (empty.accdb), are automatically created for the tests. To run a single test, use the -t option: accesstests -t unicode_null accdb If you want to report an error, it would be helpful to include the driver information by using the verbose flag and redirecting the output to a file: accesstests -v accdb >& results.txt You can pass the verbose flag twice for more verbose output: accesstests -vv accdb """ # Access SQL data types: http://msdn2.microsoft.com/en-us/library/bb208866.aspx import sys, os, re import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import abspath, dirname, join import shutil from testutils import * CNXNSTRING = None _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of composed of `seed` to make a string `length` characters long. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) / len(_TESTSTR) v = _TESTSTR * c return v[:length] class AccessTestCase(unittest.TestCase): SMALL_FENCEPOST_SIZES = [ 0, 1, 254, 255 ] # text fields <= 255 LARGE_FENCEPOST_SIZES = [ 256, 270, 304, 508, 510, 511, 512, 1023, 1024, 2047, 2048, 4000, 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] IMAGE_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] def __init__(self, method_name): unittest.TestCase.__init__(self, method_name) def setUp(self): self.cnxn = pyodbc.connect(CNXNSTRING) self.cursor = self.cnxn.cursor() # https://docs.microsoft.com/en-us/sql/odbc/microsoft/desktop-database-driver-performance-issues?view=sql-server-2017 # # As of the 4.0 drivers, you have to send as Unicode? self.cnxn.setencoding(str, encoding='utf-16le') for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, (int, long))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def _test_strtype(self, sqltype, value, resulttype=None, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or (value is None or colsize >= len(value)), 'colsize=%s value=%s' % (colsize, (value is None) and 'none' or len(value)) if colsize: sql = "create table t1(n1 int not null, s1 %s(%s), s2 %s(%s))" % (sqltype, colsize, sqltype, colsize) else: sql = "create table t1(n1 int not null, s1 %s, s2 %s)" % (sqltype, sqltype) if resulttype is None: # Access only uses Unicode, but strings might have been passed in to see if they can be written. When we # read them back, they'll be unicode, so compare our results to a Unicode version of `value`. if type(value) is str: resulttype = unicode else: resulttype = type(value) self.cursor.execute(sql) self.cursor.execute("insert into t1 values(1, ?, ?)", (value, value)) v = self.cursor.execute("select s1, s2 from t1").fetchone()[0] if type(value) is not resulttype: # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before # comparing. value = resulttype(value) self.assertEqual(type(v), resulttype) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) # # unicode # def test_unicode_null(self): self._test_strtype('varchar', None, colsize=255) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, colsize=len(value)) t.__doc__ = 'unicode %s' % len(value) return t for value in UNICODE_FENCEPOSTS: locals()['test_unicode_%s' % len(value)] = _maketest(value) # # ansi -> varchar # # Access only stores Unicode text but it should accept ASCII text. # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, colsize=len(value)) t.__doc__ = 'ansi %s' % len(value) return t for value in ANSI_FENCEPOSTS: locals()['test_ansivarchar_%s' % len(value)] = _maketest(value) # # binary # # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varbinary', buffer(value), colsize=len(value), resulttype=pyodbc.BINARY) t.__doc__ = 'binary %s' % len(value) return t for value in ANSI_FENCEPOSTS: locals()['test_binary_%s' % len(value)] = _maketest(value) # # image # def test_null_image(self): self._test_strtype('image', None) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('image', buffer(value), resulttype=pyodbc.BINARY) t.__doc__ = 'image %s' % len(value) return t for value in IMAGE_FENCEPOSTS: locals()['test_image_%s' % len(value)] = _maketest(value) # # memo # def test_null_memo(self): self._test_strtype('memo', None) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('memo', unicode(value)) t.__doc__ = 'Unicode to memo %s' % len(value) return t for value in IMAGE_FENCEPOSTS: locals()['test_memo_%s' % len(value)] = _maketest(value) # ansi -> memo def _maketest(value): def t(self): self._test_strtype('memo', value) t.__doc__ = 'ANSI to memo %s' % len(value) return t for value in IMAGE_FENCEPOSTS: locals()['test_ansimemo_%s' % len(value)] = _maketest(value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_unicode_query(self): self.cursor.execute(u"select 1") def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # date, time, datetime # def test_datetime(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(value, result) # # ints and floats # def test_int(self): value = 1234 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_smallint(self): value = 32767 self.cursor.execute("create table t1(n smallint)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_real(self): value = 1234.5 self.cursor.execute("create table t1(n real)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_real(self): value = -200.5 self.cursor.execute("create table t1(n real)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) def test_float(self): value = 1234.567 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_float(self): value = -200.5 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) def test_tinyint(self): self.cursor.execute("create table t1(n tinyint)") value = 10 self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(type(result), type(value)) self.assertEqual(value, result) # # decimal & money # def test_decimal(self): value = Decimal('12345.6789') self.cursor.execute("create table t1(n numeric(10,4))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_money(self): self.cursor.execute("create table t1(n money)") value = Decimal('1234.45') self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(type(result), type(value)) self.assertEqual(value, result) def test_negative_decimal_scale(self): value = Decimal('-10.0010') self.cursor.execute("create table t1(d numeric(19,4))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) # # bit # def test_bit(self): self.cursor.execute("create table t1(b bit)") value = True self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select b from t1").fetchone()[0] self.assertEqual(type(result), bool) self.assertEqual(value, result) def test_bit_null(self): self.cursor.execute("create table t1(b bit)") value = None self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select b from t1").fetchone()[0] self.assertEqual(type(result), bool) self.assertEqual(False, result) def test_guid(self): value = u"de2ac9c6-8676-4b0b-b8a6-217a8580cbee" self.cursor.execute("create table t1(g1 uniqueidentifier)") self.cursor.execute("insert into t1 values (?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), type(value)) self.assertEqual(len(v), len(value)) # # rowcount # def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) def test_rowcount_select(self): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a select statement, so we'll test for that behavior. This is valid behavior according to the DB API specification, but people don't seem to like it. """ self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEqual(self.cursor.rowcount, -1) rows = self.cursor.fetchall() self.assertEqual(len(rows), count) self.assertEqual(self.cursor.rowcount, -1) def test_rowcount_reset(self): "Ensure rowcount is reset to -1" self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.assertEqual(self.cursor.rowcount, 1) self.cursor.execute("create table t2(i int)") self.assertEqual(self.cursor.rowcount, -1) # # Misc # def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_concatenation(self): v2 = u'0123456789' * 25 v3 = u'9876543210' * 25 value = v2 + 'x' + v3 self.cursor.execute("create table t1(c2 varchar(250), c3 varchar(250))") self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) row = self.cursor.execute("select c2 + 'x' + c3 from t1").fetchone() self.assertEqual(row[0], value) def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(CNXNSTRING, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def main(): from optparse import OptionParser parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", default=0, action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if len(args) != 1: parser.error('dbfile argument required') if args[0].endswith('.accdb'): driver = 'Microsoft Access Driver (*.mdb, *.accdb)' drvext = 'accdb' else: driver = 'Microsoft Access Driver (*.mdb)' drvext = 'mdb' here = dirname(abspath(__file__)) src = join(here, 'empty.' + drvext) dest = join(here, 'test.' + drvext) shutil.copy(src, dest) global CNXNSTRING CNXNSTRING = 'DRIVER={%s};DBQ=%s;ExtendedAnsiSQL=1' % (driver, dest) print(CNXNSTRING) if options.verbose: cnxn = pyodbc.connect(CNXNSTRING) print_library_info(cnxn) cnxn.close() suite = load_tests(AccessTestCase, options.test) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests2/dbapi20.py0000775000175000017500000007530700000000000017461 0ustar00mkleehammermkleehammer#!/usr/bin/env python ''' Python DB API 2.0 driver compliance unit test suite. This software is Public Domain and may be used without restrictions. "Now we have booze and barflies entering the discussion, plus rumours of DBAs on drugs... and I won't tell you what flashes through my mind each time I read the subject line with 'Anal Compliance' in it. All around this is turning out to be a thoroughly unwholesome unit test." -- Ian Bicking ''' __rcs_id__ = '$Id: dbapi20.py,v 1.10 2003/10/09 03:14:14 zenzen Exp $' __version__ = '$Revision: 1.10 $'[11:-2] __author__ = 'Stuart Bishop ' import unittest import time # $Log: dbapi20.py,v $ # Revision 1.10 2003/10/09 03:14:14 zenzen # Add test for DB API 2.0 optional extension, where database exceptions # are exposed as attributes on the Connection object. # # Revision 1.9 2003/08/13 01:16:36 zenzen # Minor tweak from Stefan Fleiter # # Revision 1.8 2003/04/10 00:13:25 zenzen # Changes, as per suggestions by M.-A. Lemburg # - Add a table prefix, to ensure namespace collisions can always be avoided # # Revision 1.7 2003/02/26 23:33:37 zenzen # Break out DDL into helper functions, as per request by David Rushby # # Revision 1.6 2003/02/21 03:04:33 zenzen # Stuff from Henrik Ekelund: # added test_None # added test_nextset & hooks # # Revision 1.5 2003/02/17 22:08:43 zenzen # Implement suggestions and code from Henrik Eklund - test that cursor.arraysize # defaults to 1 & generic cursor.callproc test added # # Revision 1.4 2003/02/15 00:16:33 zenzen # Changes, as per suggestions and bug reports by M.-A. Lemburg, # Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar # - Class renamed # - Now a subclass of TestCase, to avoid requiring the driver stub # to use multiple inheritance # - Reversed the polarity of buggy test in test_description # - Test exception heirarchy correctly # - self.populate is now self._populate(), so if a driver stub # overrides self.ddl1 this change propogates # - VARCHAR columns now have a width, which will hopefully make the # DDL even more portible (this will be reversed if it causes more problems) # - cursor.rowcount being checked after various execute and fetchXXX methods # - Check for fetchall and fetchmany returning empty lists after results # are exhausted (already checking for empty lists if select retrieved # nothing # - Fix bugs in test_setoutputsize_basic and test_setinputsizes # class DatabaseAPI20Test(unittest.TestCase): ''' Test a database self.driver for DB API 2.0 compatibility. This implementation tests Gadfly, but the TestCase is structured so that other self.drivers can subclass this test case to ensure compiliance with the DB-API. It is expected that this TestCase may be expanded in the future if ambiguities or edge conditions are discovered. The 'Optional Extensions' are not yet being tested. self.drivers should subclass this test, overriding setUp, tearDown, self.driver, connect_args and connect_kw_args. Class specification should be as follows: import dbapi20 class mytest(dbapi20.DatabaseAPI20Test): [...] Don't 'import DatabaseAPI20Test from dbapi20', or you will confuse the unit tester - just 'import dbapi20'. ''' # The self.driver module. This should be the module where the 'connect' # method is to be found driver = None connect_args = () # List of arguments to pass to connect connect_kw_args = {} # Keyword arguments for connect table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix xddl1 = 'drop table %sbooze' % table_prefix xddl2 = 'drop table %sbarflys' % table_prefix lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase # Some drivers may need to override these helpers, for example adding # a 'commit' after the execute. def executeDDL1(self,cursor): cursor.execute(self.ddl1) def executeDDL2(self,cursor): cursor.execute(self.ddl2) def setUp(self): ''' self.drivers should override this method to perform required setup if any is necessary, such as creating the database. ''' pass def tearDown(self): ''' self.drivers should override this method to perform required cleanup if any is necessary, such as deleting the test database. The default drops the tables that may be created. ''' con = self._connect() try: cur = con.cursor() for i, ddl in enumerate((self.xddl1,self.xddl2)): try: cur.execute(ddl) con.commit() except self.driver.Error: # Assume table didn't exist. Other tests will check if # execute is busted. pass finally: con.close() def _connect(self): try: return self.driver.connect( *self.connect_args,**self.connect_kw_args ) except AttributeError: self.fail("No connect method found in self.driver module") def test_connect(self): con = self._connect() con.close() def test_apilevel(self): try: # Must exist apilevel = self.driver.apilevel # Must equal 2.0 self.assertEqual(apilevel,'2.0') except AttributeError: self.fail("Driver doesn't define apilevel") def test_threadsafety(self): try: # Must exist threadsafety = self.driver.threadsafety # Must be a valid value self.assertTrue(threadsafety in (0,1,2,3)) except AttributeError: self.fail("Driver doesn't define threadsafety") def test_paramstyle(self): try: # Must exist paramstyle = self.driver.paramstyle # Must be a valid value self.assertTrue(paramstyle in ( 'qmark','numeric','named','format','pyformat' )) except AttributeError: self.fail("Driver doesn't define paramstyle") def test_Exceptions(self): # Make sure required exceptions exist, and are in the # defined heirarchy. self.assertTrue(issubclass(self.driver.Warning,StandardError)) self.assertTrue(issubclass(self.driver.Error,StandardError)) self.assertTrue( issubclass(self.driver.InterfaceError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.DatabaseError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.OperationalError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.IntegrityError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.InternalError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.ProgrammingError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.NotSupportedError,self.driver.Error) ) def test_ExceptionsAsConnectionAttributes(self): # OPTIONAL EXTENSION # Test for the optional DB API 2.0 extension, where the exceptions # are exposed as attributes on the Connection object # I figure this optional extension will be implemented by any # driver author who is using this test suite, so it is enabled # by default. con = self._connect() drv = self.driver self.assertTrue(con.Warning is drv.Warning) self.assertTrue(con.Error is drv.Error) self.assertTrue(con.InterfaceError is drv.InterfaceError) self.assertTrue(con.DatabaseError is drv.DatabaseError) self.assertTrue(con.OperationalError is drv.OperationalError) self.assertTrue(con.IntegrityError is drv.IntegrityError) self.assertTrue(con.InternalError is drv.InternalError) self.assertTrue(con.ProgrammingError is drv.ProgrammingError) self.assertTrue(con.NotSupportedError is drv.NotSupportedError) def test_commit(self): con = self._connect() try: # Commit must work, even if it doesn't do anything con.commit() finally: con.close() def test_rollback(self): con = self._connect() # If rollback is defined, it should either work or throw # the documented exception if hasattr(con,'rollback'): try: con.rollback() except self.driver.NotSupportedError: pass def test_cursor(self): con = self._connect() try: cur = con.cursor() finally: con.close() def test_cursor_isolation(self): con = self._connect() try: # Make sure cursors created from the same connection have # the documented transaction isolation level cur1 = con.cursor() cur2 = con.cursor() self.executeDDL1(cur1) cur1.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) cur2.execute("select name from %sbooze" % self.table_prefix) booze = cur2.fetchall() self.assertEqual(len(booze),1) self.assertEqual(len(booze[0]),1) self.assertEqual(booze[0][0],'Victoria Bitter') finally: con.close() def test_description(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) self.assertEqual(cur.description,None, 'cursor.description should be none after executing a ' 'statement that can return no rows (such as DDL)' ) cur.execute('select name from %sbooze' % self.table_prefix) self.assertEqual(len(cur.description),1, 'cursor.description describes too many columns' ) self.assertEqual(len(cur.description[0]),7, 'cursor.description[x] tuples must have 7 elements' ) self.assertEqual(cur.description[0][0].lower(),'name', 'cursor.description[x][0] must return column name' ) self.assertEqual(cur.description[0][1],self.driver.STRING, 'cursor.description[x][1] must return column type. Got %r' % cur.description[0][1] ) # Make sure self.description gets reset self.executeDDL2(cur) self.assertEqual(cur.description,None, 'cursor.description not being set to None when executing ' 'no-result statements (eg. DDL)' ) finally: con.close() def test_rowcount(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) self.assertEqual(cur.rowcount,-1, 'cursor.rowcount should be -1 after executing no-result ' 'statements' ) cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertTrue(cur.rowcount in (-1,1), 'cursor.rowcount should == number or rows inserted, or ' 'set to -1 after executing an insert statement' ) cur.execute("select name from %sbooze" % self.table_prefix) self.assertTrue(cur.rowcount in (-1,1), 'cursor.rowcount should == number of rows returned, or ' 'set to -1 after executing a select statement' ) self.executeDDL2(cur) self.assertEqual(cur.rowcount,-1, 'cursor.rowcount not being reset to -1 after executing ' 'no-result statements' ) finally: con.close() lower_func = 'lower' def test_callproc(self): con = self._connect() try: cur = con.cursor() if self.lower_func and hasattr(cur,'callproc'): r = cur.callproc(self.lower_func,('FOO',)) self.assertEqual(len(r),1) self.assertEqual(r[0],'FOO') r = cur.fetchall() self.assertEqual(len(r),1,'callproc produced no result set') self.assertEqual(len(r[0]),1, 'callproc produced invalid result set' ) self.assertEqual(r[0][0],'foo', 'callproc produced invalid results' ) finally: con.close() def test_close(self): con = self._connect() try: cur = con.cursor() finally: con.close() # cursor.execute should raise an Error if called after connection # closed self.assertRaises(self.driver.Error,self.executeDDL1,cur) # connection.commit should raise an Error if called after connection' # closed.' self.assertRaises(self.driver.Error,con.commit) # connection.close should raise an Error if called more than once self.assertRaises(self.driver.Error,con.close) def test_execute(self): con = self._connect() try: cur = con.cursor() self._paraminsert(cur) finally: con.close() def _paraminsert(self,cur): self.executeDDL1(cur) cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertTrue(cur.rowcount in (-1,1)) if self.driver.paramstyle == 'qmark': cur.execute( 'insert into %sbooze values (?)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'numeric': cur.execute( 'insert into %sbooze values (:1)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'named': cur.execute( 'insert into %sbooze values (:beer)' % self.table_prefix, {'beer':"Cooper's"} ) elif self.driver.paramstyle == 'format': cur.execute( 'insert into %sbooze values (%%s)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'pyformat': cur.execute( 'insert into %sbooze values (%%(beer)s)' % self.table_prefix, {'beer':"Cooper's"} ) else: self.fail('Invalid paramstyle') self.assertTrue(cur.rowcount in (-1,1)) cur.execute('select name from %sbooze' % self.table_prefix) res = cur.fetchall() self.assertEqual(len(res),2,'cursor.fetchall returned too few rows') beers = [res[0][0],res[1][0]] beers.sort() self.assertEqual(beers[0],"Cooper's", 'cursor.fetchall retrieved incorrect data, or data inserted ' 'incorrectly' ) self.assertEqual(beers[1],"Victoria Bitter", 'cursor.fetchall retrieved incorrect data, or data inserted ' 'incorrectly' ) def test_executemany(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) largs = [ ("Cooper's",) , ("Boag's",) ] margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ] if self.driver.paramstyle == 'qmark': cur.executemany( 'insert into %sbooze values (?)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'numeric': cur.executemany( 'insert into %sbooze values (:1)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'named': cur.executemany( 'insert into %sbooze values (:beer)' % self.table_prefix, margs ) elif self.driver.paramstyle == 'format': cur.executemany( 'insert into %sbooze values (%%s)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'pyformat': cur.executemany( 'insert into %sbooze values (%%(beer)s)' % ( self.table_prefix ), margs ) else: self.fail('Unknown paramstyle') self.assertTrue(cur.rowcount in (-1,2), 'insert using cursor.executemany set cursor.rowcount to ' 'incorrect value %r' % cur.rowcount ) cur.execute('select name from %sbooze' % self.table_prefix) res = cur.fetchall() self.assertEqual(len(res),2, 'cursor.fetchall retrieved incorrect number of rows' ) beers = [res[0][0],res[1][0]] beers.sort() self.assertEqual(beers[0],"Boag's",'incorrect data retrieved') self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved') finally: con.close() def test_fetchone(self): con = self._connect() try: cur = con.cursor() # cursor.fetchone should raise an Error if called before # executing a select-type query self.assertRaises(self.driver.Error,cur.fetchone) # cursor.fetchone should raise an Error if called after # executing a query that cannnot return rows self.executeDDL1(cur) self.assertRaises(self.driver.Error,cur.fetchone) cur.execute('select name from %sbooze' % self.table_prefix) self.assertEqual(cur.fetchone(),None, 'cursor.fetchone should return None if a query retrieves ' 'no rows' ) self.assertTrue(cur.rowcount in (-1,0)) # cursor.fetchone should raise an Error if called after # executing a query that cannnot return rows cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertRaises(self.driver.Error,cur.fetchone) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchone() self.assertEqual(len(r),1, 'cursor.fetchone should have retrieved a single row' ) self.assertEqual(r[0],'Victoria Bitter', 'cursor.fetchone retrieved incorrect data' ) self.assertEqual(cur.fetchone(),None, 'cursor.fetchone should return None if no more rows available' ) self.assertTrue(cur.rowcount in (-1,1)) finally: con.close() samples = [ 'Carlton Cold', 'Carlton Draft', 'Mountain Goat', 'Redback', 'Victoria Bitter', 'XXXX' ] def _populate(self): ''' Return a list of sql commands to setup the DB for the fetch tests. ''' populate = [ "insert into %sbooze values ('%s')" % (self.table_prefix,s) for s in self.samples ] return populate def test_fetchmany(self): con = self._connect() try: cur = con.cursor() # cursor.fetchmany should raise an Error if called without #issuing a query self.assertRaises(self.driver.Error,cur.fetchmany,4) self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchmany() self.assertEqual(len(r),1, 'cursor.fetchmany retrieved incorrect number of rows, ' 'default of arraysize is one.' ) cur.arraysize=10 r = cur.fetchmany(3) # Should get 3 rows self.assertEqual(len(r),3, 'cursor.fetchmany retrieved incorrect number of rows' ) r = cur.fetchmany(4) # Should get 2 more self.assertEqual(len(r),2, 'cursor.fetchmany retrieved incorrect number of rows' ) r = cur.fetchmany(4) # Should be an empty sequence self.assertEqual(len(r),0, 'cursor.fetchmany should return an empty sequence after ' 'results are exhausted' ) self.assertTrue(cur.rowcount in (-1,6)) # Same as above, using cursor.arraysize cur.arraysize=4 cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchmany() # Should get 4 rows self.assertEqual(len(r),4, 'cursor.arraysize not being honoured by fetchmany' ) r = cur.fetchmany() # Should get 2 more self.assertEqual(len(r),2) r = cur.fetchmany() # Should be an empty sequence self.assertEqual(len(r),0) self.assertTrue(cur.rowcount in (-1,6)) cur.arraysize=6 cur.execute('select name from %sbooze' % self.table_prefix) rows = cur.fetchmany() # Should get all rows self.assertTrue(cur.rowcount in (-1,6)) self.assertEqual(len(rows),6) self.assertEqual(len(rows),6) rows = [r[0] for r in rows] rows.sort() # Make sure we get the right data back out for i in range(0,6): self.assertEqual(rows[i],self.samples[i], 'incorrect data retrieved by cursor.fetchmany' ) rows = cur.fetchmany() # Should return an empty list self.assertEqual(len(rows),0, 'cursor.fetchmany should return an empty sequence if ' 'called after the whole result set has been fetched' ) self.assertTrue(cur.rowcount in (-1,6)) self.executeDDL2(cur) cur.execute('select name from %sbarflys' % self.table_prefix) r = cur.fetchmany() # Should get empty sequence self.assertEqual(len(r),0, 'cursor.fetchmany should return an empty sequence if ' 'query retrieved no rows' ) self.assertTrue(cur.rowcount in (-1,0)) finally: con.close() def test_fetchall(self): con = self._connect() try: cur = con.cursor() # cursor.fetchall should raise an Error if called # without executing a query that may return rows (such # as a select) self.assertRaises(self.driver.Error, cur.fetchall) self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) # cursor.fetchall should raise an Error if called # after executing a a statement that cannot return rows self.assertRaises(self.driver.Error,cur.fetchall) cur.execute('select name from %sbooze' % self.table_prefix) rows = cur.fetchall() self.assertTrue(cur.rowcount in (-1,len(self.samples))) self.assertEqual(len(rows),len(self.samples), 'cursor.fetchall did not retrieve all rows' ) rows = [r[0] for r in rows] rows.sort() for i in range(0,len(self.samples)): self.assertEqual(rows[i],self.samples[i], 'cursor.fetchall retrieved incorrect rows' ) rows = cur.fetchall() self.assertEqual( len(rows),0, 'cursor.fetchall should return an empty list if called ' 'after the whole result set has been fetched' ) self.assertTrue(cur.rowcount in (-1,len(self.samples))) self.executeDDL2(cur) cur.execute('select name from %sbarflys' % self.table_prefix) rows = cur.fetchall() self.assertTrue(cur.rowcount in (-1,0)) self.assertEqual(len(rows),0, 'cursor.fetchall should return an empty list if ' 'a select query returns no rows' ) finally: con.close() def test_mixedfetch(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) cur.execute('select name from %sbooze' % self.table_prefix) rows1 = cur.fetchone() rows23 = cur.fetchmany(2) rows4 = cur.fetchone() rows56 = cur.fetchall() self.assertTrue(cur.rowcount in (-1,6)) self.assertEqual(len(rows23),2, 'fetchmany returned incorrect number of rows' ) self.assertEqual(len(rows56),2, 'fetchall returned incorrect number of rows' ) rows = [rows1[0]] rows.extend([rows23[0][0],rows23[1][0]]) rows.append(rows4[0]) rows.extend([rows56[0][0],rows56[1][0]]) rows.sort() for i in range(0,len(self.samples)): self.assertEqual(rows[i],self.samples[i], 'incorrect data retrieved or inserted' ) finally: con.close() def help_nextset_setUp(self,cur): ''' Should create a procedure called deleteme that returns two result sets, first the number of rows in booze then "name from booze" ''' raise NotImplementedError,'Helper not implemented' #sql=""" # create procedure deleteme as # begin # select count(*) from booze # select name from booze # end #""" #cur.execute(sql) def help_nextset_tearDown(self,cur): 'If cleaning up is needed after nextSetTest' raise NotImplementedError,'Helper not implemented' #cur.execute("drop procedure deleteme") def test_nextset(self): con = self._connect() try: cur = con.cursor() if not hasattr(cur,'nextset'): return try: self.executeDDL1(cur) sql=self._populate() for sql in self._populate(): cur.execute(sql) self.help_nextset_setUp(cur) cur.callproc('deleteme') numberofrows=cur.fetchone() assert numberofrows[0]== len(self.samples) assert cur.nextset() names=cur.fetchall() assert len(names) == len(self.samples) s=cur.nextset() assert s == None,'No more return sets, should return None' finally: self.help_nextset_tearDown(cur) finally: con.close() def test_nextset(self): raise NotImplementedError,'Drivers need to override this test' def test_arraysize(self): # Not much here - rest of the tests for this are in test_fetchmany con = self._connect() try: cur = con.cursor() self.assertTrue(hasattr(cur,'arraysize'), 'cursor.arraysize must be defined' ) finally: con.close() def test_setinputsizes(self): con = self._connect() try: cur = con.cursor() cur.setinputsizes( (25,) ) self._paraminsert(cur) # Make sure cursor still works finally: con.close() def test_setoutputsize_basic(self): # Basic test is to make sure setoutputsize doesn't blow up con = self._connect() try: cur = con.cursor() cur.setoutputsize(1000) cur.setoutputsize(2000,0) self._paraminsert(cur) # Make sure the cursor still works finally: con.close() def test_setoutputsize(self): # Real test for setoutputsize is driver dependant raise NotImplementedError,'Driver need to override this test' def test_None(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) cur.execute('insert into %sbooze values (NULL)' % self.table_prefix) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchall() self.assertEqual(len(r),1) self.assertEqual(len(r[0]),1) self.assertEqual(r[0][0],None,'NULL value not returned as None') finally: con.close() def test_Date(self): d1 = self.driver.Date(2002,12,25) d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0))) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(d1),str(d2)) def test_Time(self): t1 = self.driver.Time(13,45,30) t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0))) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(t1),str(t2)) def test_Timestamp(self): t1 = self.driver.Timestamp(2002,12,25,13,45,30) t2 = self.driver.TimestampFromTicks( time.mktime((2002,12,25,13,45,30,0,0,0)) ) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(t1),str(t2)) def test_Binary(self): b = self.driver.Binary('Something') b = self.driver.Binary('') def test_STRING(self): self.assertTrue(hasattr(self.driver,'STRING'), 'module.STRING must be defined' ) def test_BINARY(self): self.assertTrue(hasattr(self.driver,'BINARY'), 'module.BINARY must be defined.' ) def test_NUMBER(self): self.assertTrue(hasattr(self.driver,'NUMBER'), 'module.NUMBER must be defined.' ) def test_DATETIME(self): self.assertTrue(hasattr(self.driver,'DATETIME'), 'module.DATETIME must be defined.' ) def test_ROWID(self): self.assertTrue(hasattr(self.driver,'ROWID'), 'module.ROWID must be defined.' ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests2/dbapitests.py0000775000175000017500000000267700000000000020402 0ustar00mkleehammermkleehammerimport sys import unittest from testutils import * import dbapi20 def main(): add_to_path() import pyodbc from optparse import OptionParser parser = OptionParser(usage="usage: %prog [options] connection_string") parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('dbapitests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] class test_pyodbc(dbapi20.DatabaseAPI20Test): driver = pyodbc connect_args = [ connection_string ] connect_kw_args = {} def test_nextset(self): pass def test_setoutputsize(self): pass def test_ExceptionsAsConnectionAttributes(self): pass suite = unittest.makeSuite(test_pyodbc, 'test') testRunner = unittest.TextTestRunner(verbosity=(options.verbose > 1) and 9 or 0) result = testRunner.run(suite) return result if __name__ == '__main__': sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests2/empty.accdb0000664000175000017500000114000000000000000017757 0ustar00mkleehammermkleehammerStandard ACE DBnb` Ugr@?~1y0̝cßFNa7:ޜ(t,`3{6߱nC53Sy[/|*|JQrf_Љ$g'DeFx -bT4.0=dv Y P SS  Y  l Y Y  Y Y  Y  Y  Y   Y  Y  Y  Y   Y 2Y  Y   Y   Y ConnectDatabaseDateCreateDateUpdate FlagsForeignNameIdLvLvExtraLvModule LvPropName OwnerParentIdRmtInfoLongRmtInfoShortType    YYIdParentIdName        OYabDCS  Y Y Y  Y 2ACMFInheritableObjectIdSIDX)_5 +_YObjectId YS  Y  Y bWY Y  Y 1 Y c8Y  Y ,AttributeExpressionFlagLvExtra Name1 Name2ObjectId Order,Cn,C ,nCY"ObjectIdAttribute   -YS  Y Y Y  Y  Y  Y  Y  Y ccolumn grbiticolumnszColumnszObject$szReferencedColumn$szReferencedObjectszRelationship Dede D YYYszObject$szReferencedObjectszRelationship v1b N  : k &     @   OJmJJMMQkkfJUQkOJmJLJkQkSdi`k`dOo^QkiQ^JmYdbkWYfkiQfdimkkMiYfmkkvkiQ^mJL^Qk`kvkJMMQkkkmdiJUQ`kvkJMQk`kvkMd`f^QuMd^o`bk`kvkMd`f^QumvfQ+JmmJMW`Qbm`kvkMd`f^QumvfQ+OQMY`J^`kvkMd`f^QumvfQ+UoYO`kvkMd`f^QumvfQ+YQQQOdoL^Q `kvkMd`f^QumvfQ+YQQQkYbU^Q `kvkMd`f^QumvfQ+^dbU `kvkMd`f^QumvfQ+kWdim `kvkMd`f^QumvfQ+mQum`kvkMd`f^QumvfQ+obkYUbQOLvmQ `kvkbJqfJbQUidofMJmQUdiYQk`kvkbJqfJbQUidofk`kvkbJqfJbQUidofmddL[QMmk`kvkbJqfJbQdL[QMmYOk`kvkdL[QMmk`kvkhoQiYQk`kvkiQ^JmYdbkWYfkobokQO+mJL^Q`kvkOLko``JivYbSdokQiOQSYbQO  @ @ @ @     !#%')049>Av1@P  @ @ @ @ @ @ @ @ @ @ @ @DDDDDDDDD D D D DDDDDDDDDDDDD$D%FFFD)D*FF F F F F D+D,D-DFDGDHD@DADBD=D>D?D:D;D<D7D8D9!D4!D5!D6#D1#D2#D3%DC%DD%DE'D.'D/'D0)D&)D')D(0DI0DJ0DK4DL4DM4DN9DO9F9F>F>F>FAFAFAFDDDD DD DDDD!D"D#v1 v1@    d _ Z  G q  0 =X5a&Aa&L@qL@unused_tablej@EFFF:::::::8 @>|L@|L@ MSysNavPaneObjectIDsjJJJJJJJJJJH 9ͬL@ͬL@ MSysNavPaneGroupToObjectsjTTTTTTTTTTR 4ͬL@ͬL@ MSysNavPaneGroupsjDDDDDDDDDDB 0ͬL@ͬL@ MSysNavPaneGroupCategoriesjVVVVVVVVVVT )dL@aIL@ MSysAccessStoragejDDDDDDDDDDB  ͬL@ͬL@UserDefinedj@EDDD88888886 @ ͬL@ͬL@SummaryInfoj@EDDD88888886 @ͬL@ͬL@SysRelj.........., ͬL@ͬL@Scriptsj0000000000. ͬL@ͬL@Reportsj0000000000. ͬL@ͬL@Modulesj0000000000. ͬL@ͬL@Formsj,,,,,,,,,,* ͬL@ͬL@DataAccessPagesj@@@@@@@@@@> 'L@L@MSysComplexType_AttachmentTTTTTTTTTTT %L@L@MSysComplexType_TextHHHHHHHHHHH #L@L@MSysComplexType_DecimalNNNNNNNNNNN !L@L@MSysComplexType_GUIDHHHHHHHHHHH L@L@MSysComplexType_IEEEDoubleTTTTTTTTTTT L@L@MSysComplexType_IEEESingleTTTTTTTTTTT L@L@MSysComplexType_LongHHHHHHHHHHH L@L@MSysComplexType_ShortJJJJJJJJJJJ L@L@MSysComplexType_UnsignedByteXXXXXXXXXXX L@L@MSysComplexColumnsDDDDDDDDDDD L@L@MSysRelationshipsk DDDDDDDDDDB L@L@MSysQueriesk 88888888886 L@L@MSysACEsk 22222222220 L@L@MSysObjectsk 88888888886 L@=EL@MSysDbj@E:::......., @L@L@Relationshipsk <<<<<<<<<<: L@L@Databasesk 44444444442 L@L@Tablesk .........., D YN Y Y  Y Y Y ColumnNameComplexID&ComplexTypeObjectID"ConceptualTableIDFlatTableIDYYY(IdxConceptualTableIDIdxFlatTableID IdxIDv1@    fYkNY  ValuedvfYkNY  ValuedvfYkNY  ValuedvfYkNY  ValuedvfYkN  Y  ValuedvfYkN""Y  ValuedvfYkN$$Yk ValuedvfYkN&& Y k ValuedvaYN(( Y aY  Y Y  Y  Y FileDataFileFlagsFileNameFileTimeStampFileTypeFileURL((((Hv1bV Y1N**  Y PY Y ` Y , Y ,Y CY CDateCreateDateUpdateIdLvNameParentIdType,C,C*-C,։,C,C*.C,ց,C,C*/C,ցYYYIdParentIdIdParentIdName**v1b@ )&@c$ o q ( =  ˥P ʼq(IHHeH&HJuL@uL@1PROJECT,($ wIuL@uL@0PROJECTwm0,( w@uL@uL@/dir$  wHuL@uL@._VBA_PROJECT62. wKuL@uL@-AcessVBAData62. wiL@iL@,@,PROJECT840" iL@iL@+PROJECTwm>:6$ iL@iL@*@,dir0,(" iL@iL@)@,_VBA_PROJECTB>:" iL@iL@( AcessVBADataNJF.  L@uL@VBA$  whL@uL@VBAProject2.* w L@ L@MSysDbDirData PLH8 ͬL@ͬL@ImExSpecs0,( wͬL@ͬL@CustomGroups62. wͬL@ͬL@ Blob &" wͬL@ͬL@ 0   wͬL@ L@ Databases0,( wͬL@ͬL@ DataAccessPages<84 wͬL@ͬL@ Cmdbars,($ wͬL@uL@VBA$  wͬL@ͬL@Scripts,($ wͬL@ͬL@Modules,($ wͬL@ͬL@Reports,($ wͬL@ͬL@Forms($  wͬL@bOL@ijPropDataPLH8 ͬL@ͬL@MSysAccessStorage_SCRATCHPLH wͬL@bOL@MSysAccessStorage_ROOTJFB wnLVAL aam  aID="{AB5155F2-F476-415A-B1FA-54F6FF3631D6}" Name="Database1" HelpContextID="0" VersionCompatible32="393222000" CMG="080AE3F1E7F1E7F1E7F1E7" DPB="1012FBFCFCFCFCFC" GC="181AF304F404F4FB" [Host Extender Info] &H00000001={3832D640-CF90-11CF-8E43-00A0C911005A};VBE;&H00000000 am  *\G{000204EF-0000-0000-C000-000000000046}#4.0#9#C:\PROGRA~1\COMMON~1\MICROS~1\VBA\VBA6\VBE6.DLL#Visual Basic For Applications*\G{4AFFC9A0-5F99-101B-AF4E-00AA003F0F07}#9.0#0#C:\Program Files\Microsoft Office\Office12\MSACC.OLB#Microsoft Access 12.0 Object Library*\G{00020430-0000-0000-C000-000000000046}#2.0#0#C:\Windows\system32\stdole2.tlb#OLE Automation:*\G{4AC9E1DA-5BAD-4AC7-86E3-24F4CDCECA28}#c.0#0#C:\PROGRA~1\COMMON~1\MICROS~1\OFFICE12\ACEDAO.DLL#Microsoft Office 12.0 Access database engine Object Library GbIzx $AccessVBAWin16~Win32MacVBA6# Database1Lstdole`DAO<   0* pHd DatabaseD10@ = p GbIJ J< rs@tdole>stdol@e h%^*\G{00020430-C 0046}#2.0#0#C:\Windows\system32\e2.tlb#OLE AutomatioFn`DAO>JDAjOA A4AC9E1DA- 5BAD-7-86E3-24F4CDCECA28}#cAPROGRA~1\COMMONMICROSOFFICE12\ACE9.DLL#Microsoft Office 1f Access d engine Object LibraryOz Q ) @ @ @++++++++ + +  +  +  + + ++++-+!.+"/+#0+$1+% )@@@@@@+++++++ + +  + + +++-+!  +  +  + +0+$1+%.+"/+#5 )@@@@@M`OLJik+Mokmd`Uidofk+ OJmJJMMQkkfJUQk+ OJmJLJkQk+ Sdi`k+Y`QukfQMk+`dOo^Qk+`kvkJMMQkkkmdiJUQ+iddm+fidfOJmJ+iQfdimk+kMiYfmk+qLJ+`kvkJMMQkkkmdiJUQ+kMiJmMW+JMQkkqLJOJmJ+!qLJfid[QMm+ 6+  OYiOJmJ+ L^dL+ fid[QMm+%fid[QMms`+$qLJ++qLJ+fid[QMm+"OYi+#>YN11 Y Y PY  Y Y ,VY V`Y ` Filter FlagsIdNamePosition SelectedObjectIDType`V,`V,13,V`YIdv1m0Custom ^ V v0222 RY N55  Y Y Y  Y Y ,VY V`Y ` FlagsGroupCategoryIDIdName"Object Type GroupObjectIDPosition`V,`V,57,V``V,`V,58,V`YYGroupCategoryIDIdv1@@ 4 yLO"  A w  w Custom Group 162.*  w w w w w w w w 4 @66 666666666  4 @66666666 6 6  6  YN::Y Y Y Y  Y ,VY V`Y ` FlagsGroupIDIconIdNameObjectIDPositionV,`V,`V:<`,VV,`V,`V:=`,VYYGroupIDIdv1 9 A o ? o9 ; ;9;;RY eehaN??Y  Y  _Y _IdNameTypedv)uL@uL@/@ ,dir0,(" &YANBBY IDAABCAYPrimaryKeyv1 AfPvcP=*z @ -  } j C 0  m Z G p\I6"r_K8%taN:'9j 4k 4ThԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p 4j 0k 0ThԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p 0j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j %k %hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p %j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j !k !hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p !j #k #hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p #j 'k 'hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p 'j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j ThԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j )k )hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p )j k k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k k j hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k k j hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k k j hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k k j hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k k j hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k j k j hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k LVAL` MR2ANSI Query Mode(Themed Form ControlsAccessVersion NavPane CategoryUseMDIMode ShowDocumentTabs BuildHasOfflineLists>Picture Property Storage Format.CheckTruncatedNumFieldsProjVerNavPane ClosedNavPane Width*NavPane Category NameNavPane View ByNavPane Sort By    09.50     F     W      MR2ANSI Query Mode(Themed Form ControlsAccessVersion NavPane CategoryUseMDIMode ShowDocumentTabs BuildHasOfflineLists>Picture Property Storage Format.CheckTruncatedNumFieldsProjVerNavPane ClosedNavPane Width*NavPane Category NameNavPane View ByNavPane Sort By    09.50     F     W      MR2 ValidationRuleValidationTextOrientation FilterOrderByOrderByOnNameMapDefaultViewGUID8DisplayViewsOnSharePointSiteTotalsRowFilterOnLoadOrderByOnLoadHideNewFieldColumnWidthColumnOrderColumnHiddenDescription FormatCaptionSmartTagsTextAlignAggregateType  6 . U   SHgCTt          F ID     MR2h ReplicateProject>DisplayAllViewsOnSharePointSite!  MR20 Title AuthorCompanyl Database1, $Michael Kleehammer CheckFree5 vcO<)   x e Ak AhԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p Aj k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k ThԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j >k >ThԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p >j 9k 9ThԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p 4 > h=}0 k " e  a @  j 3 Aunused_table">MSysNavPaneObjectIDs2.9MSysNavPaneGroupToObjects<84MSysNavPaneGroups,(0MSysNavPaneGroupCategories>:)MSysAccessStorage,(UserDefined SummaryInfo SysRelScriptsReportsModulesFormsDataAccessPages($'MSysComplexType_Attachment>:%MSysComplexType_Text2.#MSysComplexType_Decimal84!MSysComplexType_GUID2.MSysComplexType_IEEEDouble>:MSysComplexType_IEEESingle>:MSysComplexType_Long2.MSysComplexType_Short40MSysComplexType_UnsignedByteB>MSysComplexColumns.*MSysRelationships,(MSysQueries MSysACEsMSysObjects MSysDbRelationships$ DatabasesTables)uL@uL@.@ ,_VBA_PROJECTB>:" )uL@uL@0PROJECTwm>:6$ )uL@uL@1@ ,PROJECT840" )uL@uL@- AcessVBADataNJF. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests2/empty.mdb0000664000175000017500000056000000000000000017472 0ustar00mkleehammermkleehammerStandard Jet DBnb` Ugr@?~1y0̝cßFNa7:ޜ(t,`3{6߱nC53Sy[/|*| WL@WL@SysRelq(.........., WL@WL@Modulesq(0000000000. WL@WL@Scriptsq(0000000000. WL@WL@Reportsq(0000000000. WL@WL@Formsq(,,,,,,,,,,*  WL@WL@UserDefinedq(@DDD88888886 @ WL@WL@SummaryInfoq(@DDD88888886 @9L@9L@MSysAccessObjectsq(DDDDDDDDDDB sL@sL@MSysRelationshipsp*DDDDDDDDDDB sL@sL@MSysQueriesp*88888888886 sL@sL@MSysACEsp*22222222220 sL@sL@MSysObjectsp*88888888886 sL@T紐L@MSysDbq(H@:::......., @sL@sL@Relationshipsp*<<<<<<<<<<: sL@sL@Databasesp*44444444442 sL@sL@Tablesp*.........., C{hUB/A S @ ,  |  B /   l Y 2 o\I"r_K8+p( +TsXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep +q( !p( !TsXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep !q( p( TsXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep q( p( TsXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep q( p- q( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p- q( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p- q( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p- q( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p- q( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p- q( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p( p( p( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep q( p( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep TsXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep TsXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( q( q( q( p- q( p- q( q( q( q( a LVALhhΓ MR2ANSI Query Mode(Themed Form ControlsAccessVersion NavPane CategoryUseMDIMode ShowDocumentTabs Build.CheckTruncatedNumFieldsNavPane ClosedNavPane Width*NavPane Category NameNavPane View ByNavPane Sort By    08.50      MR2ANSI Query Mode(Themed Form ControlsAccessVersion NavPane CategoryUseMDIMode ShowDocumentTabs Build.CheckTruncatedNumFieldsNavPane ClosedNavPane Width*NavPane Category NameNavPane View ByNavPane Sort By    08.50            MR2h ReplicateProject>DisplayAllViewsOnSharePointSite!  MR20 Title AuthorCompanyd empty, $Michael Kleehammer CheckFree Y̯klNY Y kDataIDklklkl`kYAOIndexv1@K[fVwccessObjectsq(DDDDDDDDDDB K[ࡱ> Root Entry\8PropData Forms P/ꏊP/ꏊReportsP/ꏊP/ꏊiMSysDbModulesP/ꏊP/ꏊScriptsP/ꏊP/ꏊVBAP/ꏊP/ꏊCmdbarsP/ꏊP/ꏊDataAccessPages P/ꏊP/ꏊDatabases P/ꏊ\80 P/ꏊ\8Blob CustomGroups P/ꏊP/ꏊImExSpecsP/ꏊP/ꏊDirDataK[>YN Y Y fqY  Y Y @LY Y  Filter FlagsIdNamePosition SelectedObjectIDType,,,YIdv1 %%% RY N Y Y Y  Y Y ,Y Y  FlagsGroupCategoryIDIdName"Object Type GroupObjectIDPosition,,,,, ,YYGroupCategoryIDIdv1@@  @&&&&&&&& & &  &   @&&&&&&&&&& &  YN""Y Y Y Y @ Y ,Y Y  FlagsGroupIDIconIdNameObjectIDPosition,,"#,,,"$,YYGroupIDIdv1!*! *qCustomg e e  yLO"" ' }  } Custom Group 162.*&" } } } } } } } }&Y@{3N((Y ID(()YPrimaryKey '!'  wRYsL@N,,Y  Y LY IdNameTypedv  +h=T)  M  I +MSysNavPaneObjectIDs2.!MSysNavPaneGroupToObjects<8MSysNavPaneGroups,(MSysNavPaneGroupCategories>:DataAccessPages($SysRelModulesScriptsReportsFormsUserDefined SummaryInfo MSysAccessObjects,(MSysRelationships,(MSysQueries MSysACEsMSysObjects MSysDbRelationships$ DatabasesTables././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests2/exceltests.py0000775000175000017500000001112200000000000020404 0ustar00mkleehammermkleehammer#!/usr/bin/python # Tests for reading from Excel files. # # I have not been able to successfully create or modify Excel files. import sys, os, re import unittest from os.path import abspath from testutils import * CNXNSTRING = None class ExcelTestCase(unittest.TestCase): def __init__(self, method_name): unittest.TestCase.__init__(self, method_name) def setUp(self): self.cnxn = pyodbc.connect(CNXNSTRING, autocommit=True) self.cursor = self.cnxn.cursor() for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, (int, long))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def test_read_sheet(self): # The first method of reading data is to access worksheets by name in this format [name$]. # # Our second sheet is named Sheet2 and has two columns. The first has values 10, 20, 30, etc. rows = self.cursor.execute("select * from [Sheet2$]").fetchall() self.assertEqual(len(rows), 5) for index, row in enumerate(rows): self.assertEqual(row.s2num, float(index + 1) * 10) def test_read_range(self): # The second method of reading data is to assign a name to a range of cells and access that as a table. # # Our first worksheet has a section named Table1. The first column has values 1, 2, 3, etc. rows = self.cursor.execute("select * from Table1").fetchall() self.assertEqual(len(rows), 10) for index, row in enumerate(rows): self.assertEqual(row.num, float(index + 1)) self.assertEqual(row.val, chr(ord('a') + index)) def test_tables(self): # This is useful for figuring out what is available tables = [ row.table_name for row in self.cursor.tables() ] assert 'Sheet2$' in tables, 'tables: %s' % ' '.join(tables) # def test_append(self): # rows = self.cursor.execute("select s2num, s2val from [Sheet2$]").fetchall() # # print rows # # nextnum = max([ row.s2num for row in rows ]) + 10 # # self.cursor.execute("insert into [Sheet2$](s2num, s2val) values (?, 'z')", nextnum) # # row = self.cursor.execute("select s2num, s2val from [Sheet2$] where s2num=?", nextnum).fetchone() # self.assertTrue(row) # # print 'added:', nextnum, len(rows), 'rows' # # self.assertEqual(row.s2num, nextnum) # self.assertEqual(row.s2val, 'z') # # self.cnxn.commit() def main(): from optparse import OptionParser parser = OptionParser() #usage=usage) parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if args: parser.error('no arguments expected') global CNXNSTRING path = dirname(abspath(__file__)) filename = join(path, 'test.xls') assert os.path.exists(filename) CNXNSTRING = 'Driver={Microsoft Excel Driver (*.xls)};DBQ=%s;READONLY=FALSE' % filename if options.verbose: cnxn = pyodbc.connect(CNXNSTRING, autocommit=True) print_library_info(cnxn) cnxn.close() suite = load_tests(ExcelTestCase, options.test) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629143953.0 pyodbc-4.0.32/tests2/informixtests.py0000775000175000017500000013316000000000000021146 0ustar00mkleehammermkleehammer#!/usr/bin/python # -*- coding: latin-1 -*- usage = """\ usage: %prog [options] connection_string Unit tests for Informix DB. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. You can also put the connection string into a tmp/setup.cfg file like so: [informixtests] connection-string=DRIVER={IBM INFORMIX ODBC DRIVER (64-bit)};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db """ import sys, os, re import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import join, getsize, dirname, abspath from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) / len(_TESTSTR) v = _TESTSTR * c return v[:length] class InformixTestCase(unittest.TestCase): SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] IMAGE_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] def __init__(self, method_name, connection_string): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string def setUp(self): self.cnxn = pyodbc.connect(self.connection_string) self.cursor = self.cnxn.cursor() for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass for i in range(3): try: self.cursor.execute("drop procedure proc%d" % i) self.cnxn.commit() except: pass try: self.cursor.execute('drop function func1') self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, (int, long))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def test_noscan(self): self.assertEqual(self.cursor.noscan, False) self.cursor.noscan = True self.assertEqual(self.cursor.noscan, True) def test_guid(self): self.cursor.execute("create table t1(g1 uniqueidentifier)") self.cursor.execute("insert into t1 values (newid())") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), 36) def test_nextset(self): self.cursor.execute("create table t1(i int)") for i in range(4): self.cursor.execute("insert into t1(i) values(?)", i) self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") for i, row in enumerate(self.cursor): self.assertEqual(i, row.i) self.assertEqual(self.cursor.nextset(), True) for i, row in enumerate(self.cursor): self.assertEqual(i + 2, row.i) def test_fixed_unicode(self): value = u"t\xebsting" self.cursor.execute("create table t1(s nchar(7))") self.cursor.execute("insert into t1 values(?)", u"t\xebsting") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), unicode) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def _test_strtype(self, sqltype, value, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), type(value)) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) # Reported by Andy Hochhaus in the pyodbc group: In 2.1.7 and earlier, a hardcoded length of 255 was used to # determine whether a parameter was bound as a SQL_VARCHAR or SQL_LONGVARCHAR. Apparently SQL Server chokes if # we bind as a SQL_LONGVARCHAR and the target column size is 8000 or less, which is considers just SQL_VARCHAR. # This means binding a 256 character value would cause problems if compared with a VARCHAR column under # 8001. We now use SQLGetTypeInfo to determine the time to switch. # # [42000] [Microsoft][SQL Server Native Client 10.0][SQL Server]The data types varchar and text are incompatible in the equal to operator. self.cursor.execute("select * from t1 where s=?", value) def _test_strliketype(self, sqltype, value, colsize=None): """ The implementation for text, image, ntext, and binary. These types do not support comparison operators. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), type(value)) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) # # varchar # def test_varchar_null(self): self._test_strtype('varchar', None, 100) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, len(value)) return t for value in ANSI_FENCEPOSTS: locals()['test_varchar_%s' % len(value)] = _maketest(value) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) def test_varchar_upperlatin(self): self._test_strtype('varchar', '') # # unicode # def test_unicode_null(self): self._test_strtype('nvarchar', None, 100) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('nvarchar', value, len(value)) return t for value in UNICODE_FENCEPOSTS: locals()['test_unicode_%s' % len(value)] = _maketest(value) def test_unicode_upperlatin(self): self._test_strtype('varchar', '') # # binary # def test_null_binary(self): self._test_strtype('varbinary', None, 100) def test_large_null_binary(self): # Bug 1575064 self._test_strtype('varbinary', None, 4000) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('varbinary', buffer(value), len(value)) return t for value in ANSI_FENCEPOSTS: locals()['test_binary_%s' % len(value)] = _maketest(value) # # image # def test_image_null(self): self._test_strliketype('image', None) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strliketype('image', buffer(value)) return t for value in IMAGE_FENCEPOSTS: locals()['test_image_%s' % len(value)] = _maketest(value) def test_image_upperlatin(self): self._test_strliketype('image', buffer('')) # # text # # def test_empty_text(self): # self._test_strliketype('text', buffer('')) def test_null_text(self): self._test_strliketype('text', None) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strliketype('text', value) return t for value in ANSI_FENCEPOSTS: locals()['test_text_%s' % len(value)] = _maketest(value) def test_text_upperlatin(self): self._test_strliketype('text', '') # # bit # def test_bit(self): value = True self.cursor.execute("create table t1(b bit)") self.cursor.execute("insert into t1 values (?)", value) v = self.cursor.execute("select b from t1").fetchone()[0] self.assertEqual(type(v), bool) self.assertEqual(v, value) # # decimal # def _decimal(self, precision, scale, negative): # From test provided by planders (thanks!) in Issue 91 self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) # Construct a decimal that uses the maximum precision and scale. decStr = '9' * (precision - scale) if scale: decStr = decStr + "." + '9' * scale if negative: decStr = "-" + decStr value = Decimal(decStr) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(v, value) def _maketest(p, s, n): def t(self): self._decimal(p, s, n) return t for (p, s, n) in [ (1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (38, 38, True) ]: locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) def test_decimal_e(self): """Ensure exponential notation decimals are properly handled""" value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 self.cursor.execute("create table t1(d decimal(10, 2))") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(result, value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_fixed_str(self): value = "testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def test_empty_unicode(self): self.cursor.execute("create table t1(s nvarchar(20))") self.cursor.execute("insert into t1 values(?)", u"") def test_unicode_query(self): self.cursor.execute(u"select 1") def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # date, time, datetime # def test_datetime(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(value), datetime) self.assertEqual(value, result) def test_datetime_fraction(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime # supported is xxx000. value = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(value), datetime) self.assertEqual(result, value) def test_datetime_fraction_rounded(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the # database supports. full = datetime(2007, 1, 15, 3, 4, 5, 123456) rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", full) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(result, rounded) def test_date(self): value = date.today() self.cursor.execute("create table t1(d date)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(type(value), date) self.assertEqual(value, result) def test_time(self): value = datetime.now().time() # We aren't yet writing values using the new extended time type so the value written to the database is only # down to the second. value = value.replace(microsecond=0) self.cursor.execute("create table t1(t time)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select t from t1").fetchone()[0] self.assertEqual(type(value), time) self.assertEqual(value, result) def test_datetime2(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime2)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(value), datetime) self.assertEqual(value, result) # # ints and floats # def test_int(self): value = 1234 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_bigint(self): input = 3000000000 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_float(self): value = 1234.567 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) # # stored procedures # # def test_callproc(self): # "callproc with a simple input-only stored procedure" # pass def test_sp_results(self): self.cursor.execute( """ Create procedure proc1 AS select top 10 name, id, xtype, refdate from sysobjects """) rows = self.cursor.execute("exec proc1").fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_results_from_temp(self): # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. # If you don't do this, you'd need to call nextset() once to skip it. self.cursor.execute( """ Create procedure proc1 AS set nocount on select top 10 name, id, xtype, refdate into #tmptable from sysobjects select * from #tmptable """) self.cursor.execute("exec proc1") self.assertTrue(self.cursor.description is not None) self.assertTrue(len(self.cursor.description) == 4) rows = self.cursor.fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_results_from_vartbl(self): self.cursor.execute( """ Create procedure proc1 AS set nocount on declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime) insert into @tmptbl select top 10 name, id, xtype, refdate from sysobjects select * from @tmptbl """) self.cursor.execute("exec proc1") rows = self.cursor.fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_with_dates(self): # Reported in the forums that passing two datetimes to a stored procedure doesn't work. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@d1 datetime, @d2 datetime) AS declare @d as int set @d = datediff(year, @d1, @d2) select @d """) self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(rows[0][0] == 0) # 0 years apart def test_sp_with_none(self): # Reported in the forums that passing None caused an error. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@x varchar(20)) AS declare @y varchar(20) set @y = @x select @y """) self.cursor.execute("exec test_sp ?", None) rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(rows[0][0] == None) # 0 years apart # # rowcount # def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) def test_rowcount_select(self): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a select statement, so we'll test for that behavior. This is valid behavior according to the DB API specification, but people don't seem to like it. """ self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEqual(self.cursor.rowcount, -1) rows = self.cursor.fetchall() self.assertEqual(len(rows), count) self.assertEqual(self.cursor.rowcount, -1) def test_rowcount_reset(self): "Ensure rowcount is reset to -1" self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.assertEqual(self.cursor.rowcount, 1) self.cursor.execute("create table t2(i int)") self.assertEqual(self.cursor.rowcount, -1) # # always return Cursor # # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very # confusing when things went wrong and added very little value even when things went right since users could always # use: cursor.execute("...").rowcount def test_retcursor_delete(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_select(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("select * from t1") self.assertEqual(v, self.cursor) # # misc # def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_temp_select(self): # A project was failing to create temporary tables via select into. self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(v, "testing") self.cursor.execute("select s into t2 from t1") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(v, "testing") def test_money(self): d = Decimal('123456.78') self.cursor.execute("create table t1(i int identity(1,1), m money)") self.cursor.execute("insert into t1(m) values (?)", d) v = self.cursor.execute("select m from t1").fetchone()[0] self.assertEqual(v, d) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_one(self): "Pass executemany a single sequence" self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, "test") ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_concatenation(self): v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() self.assertEqual(row.both, v2 + v3) def test_view_select(self): # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. # Create a table (t1) with 3 rows and a view (t2) into it. self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") for i in range(3): self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) self.cursor.execute("create view t2 as select * from t1") # Select from the view self.cursor.execute("select * from t2") rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(len(rows) == 3) def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(self.connection_string, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def test_unicode_results(self): "Ensure unicode_results forces Unicode" othercnxn = pyodbc.connect(self.connection_string, unicode_results=True) othercursor = othercnxn.cursor() # ANSI data in an ANSI column ... othercursor.execute("create table t1(s varchar(20))") othercursor.execute("insert into t1 values(?)", 'test') # ... should be returned as Unicode value = othercursor.execute("select s from t1").fetchone()[0] self.assertEqual(value, u'test') def test_informix_callproc(self): try: self.cursor.execute("drop procedure pyodbctest") self.cnxn.commit() except: pass self.cursor.execute("create table t1(s varchar(10))") self.cursor.execute("insert into t1 values(?)", "testing") self.cursor.execute(""" create procedure pyodbctest @var1 varchar(32) as begin select s from t1 return end """) self.cnxn.commit() # for row in self.cursor.procedureColumns('pyodbctest'): # print row.procedure_name, row.column_name, row.column_type, row.type_name self.cursor.execute("exec pyodbctest 'hi'") # print self.cursor.description # for row in self.cursor: # print row.s def test_skip(self): # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. self.cursor.execute("create table t1(id int)"); for i in range(1, 5): self.cursor.execute("insert into t1 values(?)", i) self.cursor.execute("select id from t1 order by id") self.assertEqual(self.cursor.fetchone()[0], 1) self.cursor.skip(2) self.assertEqual(self.cursor.fetchone()[0], 4) def test_timeout(self): self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) self.cnxn.timeout = 30 self.assertEqual(self.cnxn.timeout, 30) self.cnxn.timeout = 0 self.assertEqual(self.cnxn.timeout, 0) def test_sets_execute(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) self.assertRaises(pyodbc.ProgrammingError, f) def test_sets_executemany(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.executemany("insert into t1 (word) values (?)", [words]) self.assertRaises(TypeError, f) def test_row_execute(self): "Ensure we can use a Row object as a parameter to execute" self.cursor.execute("create table t1(n int, s varchar(10))") self.cursor.execute("insert into t1 values (1, 'a')") row = self.cursor.execute("select n, s from t1").fetchone() self.assertNotEqual(row, None) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.execute("insert into t2 values (?, ?)", row) def test_row_executemany(self): "Ensure we can use a Row object as a parameter to executemany" self.cursor.execute("create table t1(n int, s varchar(10))") for i in range(3): self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) rows = self.cursor.execute("select n, s from t1").fetchall() self.assertNotEqual(len(rows), 0) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.executemany("insert into t2 values (?, ?)", rows) def test_description(self): "Ensure cursor.description is correct" self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") self.cursor.execute("select * from t1") # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the # items I do know. # int t = self.cursor.description[0] self.assertEqual(t[0], 'n') self.assertEqual(t[1], int) self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # varchar(8) t = self.cursor.description[1] self.assertEqual(t[0], 's') self.assertEqual(t[1], str) self.assertEqual(t[4], 8) # precision self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # decimal(5, 2) t = self.cursor.description[2] self.assertEqual(t[0], 'd') self.assertEqual(t[1], Decimal) self.assertEqual(t[4], 5) # precision self.assertEqual(t[5], 2) # scale self.assertEqual(t[6], True) # nullable def test_none_param(self): "Ensure None can be used for params other than the first" # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the # first column, but did not work for later columns. # # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, # binary/varbinary won't allow an implicit conversion. self.cursor.execute("create table t1(n int, blob varbinary(max))") self.cursor.execute("insert into t1 values (1, newid())") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 1) self.assertEqual(type(row.blob), buffer) self.cursor.execute("update t1 set n=?, blob=?", 2, None) row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 2) self.assertEqual(row.blob, None) def test_output_conversion(self): def convert(value): # `value` will be a string. We'll simply add an X at the beginning at the end. return 'X' + value + 'X' self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) self.cursor.execute("create table t1(n int, v varchar(10))") self.cursor.execute("insert into t1 values (1, '123.45')") value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') # Now clear the conversions and try again. There should be no Xs this time. self.cnxn.clear_output_converters() value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') def test_too_large(self): """Ensure error raised if insert fails due to truncation""" value = 'x' * 1000 self.cursor.execute("create table t1(s varchar(800))") def test(): self.cursor.execute("insert into t1 values (?)", value) self.assertRaises(pyodbc.DataError, test) def test_geometry_null_insert(self): def convert(value): return value self.cnxn.add_output_converter(-151, convert) # -151 is SQL Server's geometry self.cursor.execute("create table t1(n int, v geometry)") self.cursor.execute("insert into t1 values (?, ?)", 1, None) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, None) self.cnxn.clear_output_converters() def test_login_timeout(self): # This can only test setting since there isn't a way to cause it to block on the server side. cnxns = pyodbc.connect(self.connection_string, timeout=2) def test_row_equal(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test')") row1 = self.cursor.execute("select n, s from t1").fetchone() row2 = self.cursor.execute("select n, s from t1").fetchone() b = (row1 == row2) self.assertEqual(b, True) def test_row_gtlt(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test1')") self.cursor.execute("insert into t1 values (1, 'test2')") rows = self.cursor.execute("select n, s from t1 order by s").fetchall() self.assertTrue(rows[0] < rows[1]) self.assertTrue(rows[0] <= rows[1]) self.assertTrue(rows[1] > rows[0]) self.assertTrue(rows[1] >= rows[0]) self.assertTrue(rows[0] != rows[1]) rows = list(rows) rows.sort() # uses < def test_context_manager(self): with pyodbc.connect(self.connection_string) as cnxn: cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) # The connection should be closed now. def test(): cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertRaises(pyodbc.ProgrammingError, test) def test_untyped_none(self): # From issue 129 value = self.cursor.execute("select ?", None).fetchone()[0] self.assertEqual(value, None) def test_large_update_nodata(self): self.cursor.execute('create table t1(a varbinary(max))') hundredkb = buffer('x'*100*1024) self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) def test_func_param(self): self.cursor.execute(''' create function func1 (@testparam varchar(4)) returns @rettest table (param varchar(4)) as begin insert @rettest select @testparam return end ''') self.cnxn.commit() value = self.cursor.execute("select * from func1(?)", 'test').fetchone()[0] self.assertEqual(value, 'test') def test_no_fetch(self): # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to # confuse the driver. self.cursor.execute('select 1') self.cursor.execute('select 1') self.cursor.execute('select 1') def test_drivers(self): drivers = pyodbc.drivers() self.assertEqual(list, type(drivers)) self.assertTrue(len(drivers) > 1) m = re.search('DRIVER={?([^}]+?)}?;', self.connection_string, re.IGNORECASE) current = m.group(1) self.assertTrue(current in drivers) def test_prepare_cleanup(self): # When statement is prepared, it is kept in case the next execute uses the same statement. This must be # removed when a non-execute statement is used that returns results, such as SQLTables. self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") self.cursor.fetchone() self.cursor.tables("bogus") self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") self.cursor.fetchone() def main(): from optparse import OptionParser parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('informixtests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string) print_library_info(cnxn) cnxn.close() suite = load_tests(InformixTestCase, options.test, connection_string) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629143953.0 pyodbc-4.0.32/tests2/mysqltests.py0000775000175000017500000006516400000000000020470 0ustar00mkleehammermkleehammer#!/usr/bin/python # -*- coding: latin-1 -*- usage = """\ usage: %prog [options] connection_string Unit tests for MySQL. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. These tests use the pyodbc library from the build directory, not the version installed in your Python directories. You must run `python setup.py build` before running these tests. You can also put the connection string into a tmp/setup.cfg file like so: [mysqltests] connection-string=DRIVER=MySQL ODBC 8.0 ANSI Driver;charset=utf8mb4;SERVER=localhost;DATABASE=pyodbc;UID=root;PWD=rootpw Note: Use the "ANSI" (not the "Unicode") driver and include charset=utf8mb4 in the connection string so the high-Unicode tests won't fail. """ import sys, os, re import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import join, getsize, dirname, abspath, basename from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of composed of `seed` to make a string `length` characters long. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) / len(_TESTSTR) v = _TESTSTR * c return v[:length] class MySqlTestCase(unittest.TestCase): SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] BLOB_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] def __init__(self, method_name, connection_string): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string def setUp(self): self.cnxn = pyodbc.connect(self.connection_string) self.cursor = self.cnxn.cursor() self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') self.cnxn.setencoding(str, encoding='utf-8') self.cnxn.setencoding(unicode, encoding='utf-8', ctype=pyodbc.SQL_CHAR) # As of libmyodbc5w 5.3 SQLGetTypeInfo returns absurdly small sizes # leading to slow writes. Override them: self.cnxn.maxwrite = 1024 * 1024 * 1024 for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass for i in range(3): try: self.cursor.execute("drop procedure proc%d" % i) self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, (int, long))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def _test_strtype(self, sqltype, value, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype try: self.cursor.execute(sql) except: print '>>>>', sql self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] # Removing this check for now until I get the charset working properly. # If we use latin1, results are 'str' instead of 'unicode', which would be # correct. Setting charset to ucs-2 causes a crash in SQLGetTypeInfo(SQL_DATETIME). # self.assertEqual(type(v), type(value)) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) def test_raw_encoding(self): # Read something that is valid ANSI and make sure it comes through. # The database is actually going to send us UTF-8 so don't use extended # characters. # # REVIEW: Is there a good way to write UTF-8 into the database and read # it out? self.cnxn.setencoding(str, encoding='raw') expected = "testing" self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values (?)", expected) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(result, expected) def test_raw_decoding(self): # Read something that is valid ANSI and make sure it comes through. # The database is actually going to send us UTF-8 so don't use extended # characters. # # REVIEW: Is there a good way to write UTF-8 into the database and read # it out? self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='raw') self._test_strtype('varchar', _TESTSTR, 100) # # varchar # def test_varchar_null(self): self._test_strtype('varchar', None, 100) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, max(1, len(value))) return t for value in ANSI_FENCEPOSTS: locals()['test_varchar_%s' % len(value)] = _maketest(value) # Generate a test using Unicode. for value in UNICODE_FENCEPOSTS: locals()['test_wvarchar_%s' % len(value)] = _maketest(value) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) def test_varchar_upperlatin(self): self._test_strtype('varchar', u'', colsize=3) # # binary # def test_null_binary(self): self._test_strtype('varbinary', None, 100) def test_large_null_binary(self): # Bug 1575064 self._test_strtype('varbinary', None, 4000) # Generate a test for each fencepost size: test_binary_0, etc. def _maketest(value): def t(self): self._test_strtype('varbinary', bytearray(value), max(1, len(value))) return t for value in ANSI_FENCEPOSTS: locals()['test_binary_%s' % len(value)] = _maketest(value) # # blob # def test_blob_null(self): self._test_strtype('blob', None) # Generate a test for each fencepost size: test_blob_0, etc. def _maketest(value): def t(self): self._test_strtype('blob', bytearray(value)) return t for value in BLOB_FENCEPOSTS: locals()['test_blob_%s' % len(value)] = _maketest(value) def test_blob_upperlatin(self): self._test_strtype('blob', bytearray('')) # # text # def test_null_text(self): self._test_strtype('text', None) # Generate a test for each fencepost size: test_text_0, etc. def _maketest(value): def t(self): self._test_strtype('text', value) return t for value in ANSI_FENCEPOSTS: locals()['test_text_%s' % len(value)] = _maketest(value) def test_text_upperlatin(self): self._test_strtype('text', u'') # # unicode # def test_unicode_query(self): self.cursor.execute(u"select 1") # # bit # # The MySQL driver maps BIT colums to the ODBC bit data type, but they aren't behaving quite like a Boolean value # (which is what the ODBC bit data type really represents). The MySQL BOOL data type is just an alias for a small # integer, so pyodbc can't recognize it and map it back to True/False. # # You can use both BIT and BOOL and they will act as you expect if you treat them as integers. You can write 0 and # 1 to them and they will work. # def test_bit(self): # value = True # self.cursor.execute("create table t1(b bit)") # self.cursor.execute("insert into t1 values (?)", value) # v = self.cursor.execute("select b from t1").fetchone()[0] # self.assertEqual(type(v), bool) # self.assertEqual(v, value) # # def test_bit_string_true(self): # self.cursor.execute("create table t1(b bit)") # self.cursor.execute("insert into t1 values (?)", "xyzzy") # v = self.cursor.execute("select b from t1").fetchone()[0] # self.assertEqual(type(v), bool) # self.assertEqual(v, True) # # def test_bit_string_false(self): # self.cursor.execute("create table t1(b bit)") # self.cursor.execute("insert into t1 values (?)", "") # v = self.cursor.execute("select b from t1").fetchone()[0] # self.assertEqual(type(v), bool) # self.assertEqual(v, False) # # decimal # def test_small_decimal(self): # value = Decimal('1234567890987654321') value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) self.cursor.execute("create table t1(d numeric(19))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_small_decimal_scale(self): # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation # example in the C Data Types appendix. value = '1000.10' value = Decimal(value) self.cursor.execute("create table t1(d numeric(20,6))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_negative_decimal_scale(self): value = Decimal('-10.0010') self.cursor.execute("create table t1(d numeric(19,4))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_fixed_str(self): value = u"testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # date, time, datetime # def test_datetime(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(value, result) def test_date(self): value = date(2001, 1, 1) self.cursor.execute("create table t1(dt date)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), type(value)) self.assertEqual(result, value) # # ints and floats # def test_int(self): value = 1234 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_bigint(self): # This fails on 64-bit Fedora with 5.1. # Should return 0x0123456789 # Does return 0x0000000000 # # Top 4 bytes are returned as 0x00 00 00 00. If the input is high enough, they are returned as 0xFF FF FF FF. input = 0x123456789 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_float(self): value = 1234.5 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) def test_date(self): value = date.today() self.cursor.execute("create table t1(d date)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(value, result) def test_time(self): value = datetime.now().time() # We aren't yet writing values using the new extended time type so the value written to the database is only # down to the second. value = value.replace(microsecond=0) self.cursor.execute("create table t1(t time)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select t from t1").fetchone()[0] self.assertEqual(value, result) # # misc # def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) def test_rowcount_select(self): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount. Databases can return the actual rowcount or they can return -1 if it would help performance. MySQL seems to always return the correct rowcount. """ self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEqual(self.cursor.rowcount, count) rows = self.cursor.fetchall() self.assertEqual(len(rows), count) self.assertEqual(self.cursor.rowcount, count) def test_rowcount_reset(self): "Ensure rowcount is reset to -1" # The Python DB API says that rowcount should be set to -1 and most ODBC drivers let us know there are no # records. MySQL always returns 0, however. Without parsing the SQL (which we are not going to do), I'm not # sure how we can tell the difference and set the value to -1. For now, I'll have this test check for 0. self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.assertEqual(self.cursor.rowcount, 1) self.cursor.execute("create table t2(i int)") self.assertEqual(self.cursor.rowcount, 0) def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_one(self): "Pass executemany a single sequence" self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, "test") ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) # REVIEW: The following fails. Research. # def test_executemany_failure(self): # """ # Ensure that an exception is raised if one query in an executemany fails. # """ # self.cursor.execute("create table t1(a int, b varchar(10))") # # params = [ (1, 'good'), # ('error', 'not an int'), # (3, 'good') ] # # self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(self.connection_string, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def test_emoticons_as_parameter(self): # https://github.com/mkleehammer/pyodbc/issues/423 # # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number # of characters. Ensure it works even with 4-byte characters. # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = u"x \U0001F31C z" self.cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") self.cursor.execute("insert into t1 values (?)", v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def test_emoticons_as_literal(self): # https://github.com/mkleehammer/pyodbc/issues/630 v = u"x \U0001F31C z" self.cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") self.cursor.execute("insert into t1 values ('%s')" % v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def main(): from optparse import OptionParser parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: filename = basename(sys.argv[0]) assert filename.endswith('.py') connection_string = load_setup_connection_string(filename[:-3]) if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string) print_library_info(cnxn) cnxn.close() suite = load_tests(MySqlTestCase, options.test, connection_string) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629143953.0 pyodbc-4.0.32/tests2/pgtests.py0000775000175000017500000005374300000000000017731 0ustar00mkleehammermkleehammer#!/usr/bin/python # -*- coding: utf-8 -*- usage = """\ usage: %prog [options] connection_string Unit tests for PostgreSQL. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. You can also put the connection string into a tmp/setup.cfg file like so: [pgtests] connection-string=DSN=PostgreSQL35W Note: Be sure to use the "Unicode" (not the "ANSI") version of the PostgreSQL ODBC driver. """ import sys, os, re import unittest from decimal import Decimal from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of composed of `seed` to make a string `length` characters long. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) / len(_TESTSTR) v = _TESTSTR * c return v[:length] class PGTestCase(unittest.TestCase): # These are from the C++ code. Keep them up to date. # If we are reading a binary, string, or unicode value and do not know how large it is, we'll try reading 2K into a # buffer on the stack. We then copy into a new Python object. SMALL_READ = 100 # A read guaranteed not to fit in the MAX_STACK_STACK stack buffer, but small enough to be used for varchar (4K max). LARGE_READ = 4000 SMALL_STRING = _generate_test_string(SMALL_READ) LARGE_STRING = _generate_test_string(LARGE_READ) def __init__(self, connection_string, ansi, unicode_results, method_name): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string self.ansi = ansi self.unicode = unicode_results def setUp(self): self.cnxn = pyodbc.connect(self.connection_string, ansi=self.ansi) self.cursor = self.cnxn.cursor() # I've set my test database to use UTF-8 which seems most popular. self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') self.cnxn.setencoding(str, encoding='utf-8') self.cnxn.setencoding(unicode, encoding='utf-8') # As of psql 9.5.04 SQLGetTypeInfo returns absurdly small sizes leading # to slow writes. Override them: self.cnxn.maxwrite = 1024 * 1024 * 1024 for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, (int, long))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) def _test_strtype(self, sqltype, value, colsize=None, resulttype=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) self.cursor.execute("select * from t1") row = self.cursor.fetchone() result = row[0] if resulttype and type(value) is not resulttype: value = resulttype(value) self.assertEqual(result, value) def test_maxwrite(self): # If we write more than `maxwrite` bytes, pyodbc will switch from # binding the data all at once to providing it at execute time with # SQLPutData. The default maxwrite is 1GB so this is rarely needed in # PostgreSQL but I need to test the functionality somewhere. self.cnxn.maxwrite = 300 self._test_strtype('varchar', unicode(_generate_test_string(400), 'utf-8')) # # varchar # def test_empty_varchar(self): self._test_strtype('varchar', u'', self.SMALL_READ) def test_null_varchar(self): self._test_strtype('varchar', None, self.SMALL_READ) def test_large_null_varchar(self): # There should not be a difference, but why not find out? self._test_strtype('varchar', None, self.LARGE_READ) def test_small_varchar(self): self._test_strtype('varchar', unicode(self.SMALL_STRING), self.SMALL_READ) def test_large_varchar(self): self._test_strtype('varchar', unicode(self.LARGE_STRING), self.LARGE_READ) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) def test_varchar_bytes(self): # Write non-unicode data to a varchar field. self._test_strtype('varchar', self.SMALL_STRING, self.SMALL_READ) def test_small_decimal(self): # value = Decimal('1234567890987654321') value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) self.cursor.execute("create table t1(d numeric(19))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_small_decimal_scale(self): # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation # example in the C Data Types appendix. value = '1000.10' value = Decimal(value) self.cursor.execute("create table t1(d numeric(20,6))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_negative_decimal_scale(self): value = Decimal('-10.0010') self.cursor.execute("create table t1(d numeric(19,4))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_fixed_str(self): value = "testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_raw_encoding(self): # Read something that is valid ANSI and make sure it comes through. # The database is actually going to send us UTF-8 so don't use extended # characters. # # REVIEW: Is there a good way to write UTF-8 into the database and read # it out? self.cnxn.setencoding(str, encoding='raw') expected = "testing" self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values (?)", expected) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(result, expected) def test_raw_decoding(self): # Read something that is valid ANSI and make sure it comes through. # The database is actually going to send us UTF-8 so don't use extended # characters. # # REVIEW: Is there a good way to write UTF-8 into the database and read # it out? self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='raw') self._test_strtype('varchar', self.SMALL_STRING) def test_setdecoding(self): # Force the result to be a string instead of unicode object. I'm not # sure how to change the encoding for a single column. (Though I'm # glad you can't - the communications encoding should not depend on # per-column encoding like MySQL uses.) self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf8', to=str) self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf8', to=str) self._test_strtype('varchar', 'test', self.SMALL_READ) def test_unicode_latin(self): value = u"x-\u00C2-y" # A hat : Â self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) def test_rowcount_select(self): self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEqual(self.cursor.rowcount, 4) # PostgreSQL driver fails here? # def test_rowcount_reset(self): # "Ensure rowcount is reset to -1" # # self.cursor.execute("create table t1(i int)") # count = 4 # for i in range(count): # self.cursor.execute("insert into t1 values (?)", i) # self.assertEqual(self.cursor.rowcount, 1) # # self.cursor.execute("create table t2(i int)") # self.assertEqual(self.cursor.rowcount, -1) def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) # REVIEW: Without the cast, we get the following error: # [07006] [unixODBC]Received an unsupported type from Postgres.;\nERROR: table "t2" does not exist (14) count = self.cursor.execute("select cast(count(*) as int) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_executemany_generator(self): self.cursor.execute("create table t1(a int)") self.cursor.executemany("insert into t1(a) values (?)", ((i,) for i in range(4))) row = self.cursor.execute("select min(a) mina, max(a) maxa from t1").fetchone() self.assertEqual(row.mina, 0) self.assertEqual(row.maxa, 3) def test_executemany_iterator(self): self.cursor.execute("create table t1(a int)") values = [ (i,) for i in range(4) ] self.cursor.executemany("insert into t1(a) values (?)", iter(values)) row = self.cursor.execute("select min(a) mina, max(a) maxa from t1").fetchone() self.assertEqual(row.mina, 0) self.assertEqual(row.maxa, 3) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_pickling(self): row = self.cursor.execute("select 1 a, 'two' b").fetchone() import pickle s = pickle.dumps(row) other = pickle.loads(s) self.assertEqual(row, other) def test_int_limits(self): values = [ (-sys.maxint - 1), -1, 0, 1, 3230392212, sys.maxint ] self.cursor.execute("create table t1(a bigint)") for value in values: self.cursor.execute("delete from t1") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select a from t1").fetchone()[0] self.assertEqual(v, value) def test_emoticons_as_parameter(self): # https://github.com/mkleehammer/pyodbc/issues/423 # # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number # of characters. Ensure it works even with 4-byte characters. # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = "x \U0001F31C z" self.cursor.execute("CREATE TABLE t1(s varchar(100))") self.cursor.execute("insert into t1 values (?)", v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def test_emoticons_as_literal(self): # https://github.com/mkleehammer/pyodbc/issues/630 v = "x \U0001F31C z" self.cursor.execute("CREATE TABLE t1(s varchar(100))") self.cursor.execute("insert into t1 values ('%s')" % v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def test_cursor_messages(self): """ Test the Cursor.messages attribute. """ # self.cursor is used in setUp, hence is not brand new at this point brand_new_cursor = self.cnxn.cursor() self.assertIsNone(brand_new_cursor.messages) # using INFO message level because they are always sent to the client regardless of # client_min_messages: https://www.postgresql.org/docs/11/runtime-config-client.html for msg in ('hello world', 'ABCDEFGHIJ' * 400): self.cursor.execute(""" CREATE OR REPLACE PROCEDURE test_cursor_messages() LANGUAGE plpgsql AS $$ BEGIN RAISE INFO '{}' USING ERRCODE = '01000'; END; $$; """.format(msg)) self.cursor.execute("CALL test_cursor_messages();") messages = self.cursor.messages self.assertTrue(type(messages) is list) self.assertTrue(len(messages) > 0) self.assertTrue(all(type(m) is tuple for m in messages)) self.assertTrue(all(len(m) == 2 for m in messages)) self.assertTrue(all(type(m[0]) is unicode for m in messages)) self.assertTrue(all(type(m[1]) is unicode for m in messages)) self.assertTrue(all(m[0] == '[01000] (-1)' for m in messages)) self.assertTrue(''.join(m[1] for m in messages).endswith(msg)) def main(): from optparse import OptionParser parser = OptionParser(usage="usage: %prog [options] connection_string") parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") parser.add_option('-a', '--ansi', help='ANSI only', default=False, action='store_true') parser.add_option('-u', '--unicode', help='Expect results in Unicode', default=False, action='store_true') (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('pgtests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string, ansi=options.ansi) print_library_info(cnxn) cnxn.close() if options.test: # Run a single test if not options.test.startswith('test_'): options.test = 'test_%s' % (options.test) s = unittest.TestSuite([ PGTestCase(connection_string, options.ansi, options.unicode, options.test) ]) else: # Run all tests in the class methods = [ m for m in dir(PGTestCase) if m.startswith('test_') ] methods.sort() s = unittest.TestSuite([ PGTestCase(connection_string, options.ansi, options.unicode, m) for m in methods ]) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(s) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629143953.0 pyodbc-4.0.32/tests2/sqldwtests.py0000664000175000017500000015410600000000000020445 0ustar00mkleehammermkleehammer#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function usage = """\ usage: %prog [options] connection_string Unit tests for Azure SQL DW. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. You can also put the connection string into a tmp/setup.cfg file like so: [sqldwtests] connection-string=DRIVER={SQL Server};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db The connection string above will use the 2000/2005 driver, even if SQL Server 2008 is installed: 2000: DRIVER={SQL Server} 2005: DRIVER={SQL Server} 2008: DRIVER={SQL Server Native Client 10.0} If using FreeTDS ODBC, be sure to use version 1.00.97 or newer. """ import sys, os, re, uuid import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import join, getsize, dirname, abspath from warnings import warn from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) / len(_TESTSTR) v = _TESTSTR * c return v[:length] class SqlServerTestCase(unittest.TestCase): SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] MAX_FENCEPOST_SIZES = [ 5 * 1024 * 1024 ] #, 50 * 1024 * 1024 ] ANSI_SMALL_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] UNICODE_SMALL_FENCEPOSTS = [ unicode(s) for s in ANSI_SMALL_FENCEPOSTS ] ANSI_LARGE_FENCEPOSTS = ANSI_SMALL_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] UNICODE_LARGE_FENCEPOSTS = UNICODE_SMALL_FENCEPOSTS + [ unicode(s) for s in [_generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ]] ANSI_MAX_FENCEPOSTS = ANSI_LARGE_FENCEPOSTS + [ _generate_test_string(size) for size in MAX_FENCEPOST_SIZES ] UNICODE_MAX_FENCEPOSTS = UNICODE_LARGE_FENCEPOSTS + [ unicode(s) for s in [_generate_test_string(size) for size in MAX_FENCEPOST_SIZES ]] def __init__(self, method_name, connection_string): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string def driver_type_is(self, type_name): recognized_types = { 'msodbcsql': '(Microsoft) ODBC Driver xx for SQL Server', 'freetds': 'FreeTDS ODBC', } if not type_name in recognized_types.keys(): raise KeyError('"{0}" is not a recognized driver type: {1}'.format(type_name, list(recognized_types.keys()))) driver_name = self.cnxn.getinfo(pyodbc.SQL_DRIVER_NAME).lower() if type_name == 'msodbcsql': return ('msodbcsql' in driver_name) or ('sqlncli' in driver_name) or ('sqlsrv32.dll' == driver_name) elif type_name == 'freetds': return ('tdsodbc' in driver_name) def get_sqlserver_version(self): """ Returns the major version: 8-->2000, 9-->2005, 10-->2008 """ self.cursor.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') AS VARCHAR(255))") row = self.cursor.fetchone() return int(row[0].split('.', 1)[0]) def setUp(self): self.cnxn = pyodbc.connect(self.connection_string) self.cursor = self.cnxn.cursor() for i in range(3): try: self.cursor.execute("drop table t%d" % i) except: pass for i in range(3): try: self.cursor.execute("drop procedure proc%d" % i) except: pass try: self.cursor.execute('drop function func1') except: pass def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_binary_type(self): if sys.hexversion >= 0x02060000: self.assertTrue(pyodbc.BINARY is bytearray) else: self.assertTrue(pyodbc.BINARY is buffer) def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, (int, long))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def test_noscan(self): self.assertEqual(self.cursor.noscan, False) self.cursor.noscan = True self.assertEqual(self.cursor.noscan, True) def test_nextset(self): self.cursor.execute("create table t1(i int)") for i in range(4): self.cursor.execute("insert into t1(i) values(?)", i) self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") for i, row in enumerate(self.cursor): self.assertEqual(i, row.i) self.assertEqual(self.cursor.nextset(), True) for i, row in enumerate(self.cursor): self.assertEqual(i + 2, row.i) def test_nextset_with_raiserror(self): self.cursor.execute("select i = 1; RAISERROR('c', 16, 1);") row = next(self.cursor) self.assertEqual(1, row.i) if self.driver_type_is('freetds'): warn('FREETDS_KNOWN_ISSUE - test_nextset_with_raiserror: test cancelled.') # AssertionError: ProgrammingError not raised by nextset # https://github.com/FreeTDS/freetds/issues/230 return # for now self.assertRaises(pyodbc.ProgrammingError, self.cursor.nextset) def test_fixed_unicode(self): value = u"t\xebsting" self.cursor.execute("create table t1(s nchar(7))") self.cursor.execute("insert into t1 values(?)", u"t\xebsting") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), unicode) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def _test_strtype(self, sqltype, value, resulttype=None, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize in (None, 'max') or isinstance(colsize, int), colsize assert colsize in (None, 'max') or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s)) with (heap)" % (sqltype, colsize) else: sql = "create table t1(s %s) with (heap)" % sqltype self.cursor.execute(sql) if resulttype is None: resulttype = type(value) sql = "insert into t1 values(?)" try: if colsize == 'max': if sqltype == 'varbinary': sqlbind = pyodbc.SQL_VARBINARY elif sqltype == 'varchar': sqlbind = pyodbc.SQL_VARCHAR else: sqlbind = pyodbc.SQL_WVARCHAR self.cursor.setinputsizes([(sqlbind, 0, 0)]) elif (sqltype == 'nvarchar' or sqltype == 'varchar') and colsize != 'max' and colsize > 2000: self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 0, 0)]) else: self.cursor.setinputsizes(None) self.cursor.execute(sql, value) except pyodbc.DataError: if self.driver_type_is('freetds'): # FREETDS_KNOWN_ISSUE # # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so # pyodbc can't call SQLDescribeParam to get the correct parameter type. # This can lead to errors being returned from SQL Server when sp_prepexec is called, # e.g., "Implicit conversion from data type varchar to varbinary is not allowed." # for test_binary_null # # So at least verify that the user can manually specify the parameter type if sqltype == 'varbinary': sql_param_type = pyodbc.SQL_VARBINARY # (add elif blocks for other cases as required) self.cursor.setinputsizes([(sql_param_type, colsize, 0)]) self.cursor.execute(sql, value) else: raise v = self.cursor.execute("select * from t1").fetchone()[0] # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before # comparing. if type(value) is not resulttype: value = resulttype(value) self.assertEqual(v, value) def _test_strliketype(self, sqltype, value, resulttype=None, colsize=None): """ The implementation for text, image, ntext, and binary. These types do not support comparison operators. """ assert colsize is None or isinstance(colsize, int), colsize assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype if resulttype is None: resulttype = type(value) self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(result), resulttype) # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before # comparing. if type(value) is not resulttype: value = resulttype(value) self.assertEqual(result, value) # # varchar # def test_varchar_null(self): self._test_strtype('varchar', None, colsize=100) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, colsize=len(value)) return t for value in UNICODE_SMALL_FENCEPOSTS: locals()['test_varchar_%s' % len(value)] = _maketest(value) # Also test varchar(max) def _maketest(value): def t(self): self._test_strtype('varchar', value, colsize='max') return t for value in UNICODE_MAX_FENCEPOSTS: locals()['test_varcharmax_%s' % len(value)] = _maketest(value) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) def test_varchar_upperlatin(self): self._test_strtype('varchar', u'\u00e5', colsize=1) # # nvarchar # def test_nvarchar_null(self): self._test_strtype('nvarchar', None, colsize=100) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('nvarchar', value, colsize=len(value)) return t for value in UNICODE_SMALL_FENCEPOSTS: locals()['test_nvarchar_%s' % len(value)] = _maketest(value) # Also test nvarchar(max) def _maketest(value): def t(self): self._test_strtype('nvarchar', value, colsize='max') return t for value in UNICODE_MAX_FENCEPOSTS: locals()['test_nvarcharmax_%s' % len(value)] = _maketest(value) def test_unicode_upperlatin(self): self._test_strtype('nvarchar', u'\u00e5', colsize=1) def test_unicode_longmax(self): # Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes ver = self.get_sqlserver_version() if ver < 9: # 2005+ return # so pass / ignore self.cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))") def test_fast_executemany_to_local_temp_table(self): if self.driver_type_is('freetds'): warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_to_local_temp_table: test cancelled.') return v = u'Ώπα' self.cursor.execute("CREATE TABLE #issue295 (id INT, txt NVARCHAR(50))") sql = "INSERT INTO #issue295 (txt) VALUES (?)" params = [(v,)] self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 50, 0)]) self.cursor.fast_executemany = True self.cursor.executemany(sql, params) self.assertEqual(self.cursor.execute("SELECT txt FROM #issue295").fetchval(), v) # # binary # def test_binaryNull_object(self): self.cursor.execute("create table t1(n varbinary(10))") self.cursor.execute("insert into t1 values (?)", pyodbc.BinaryNull); # buffer def _maketest(value): def t(self): self._test_strtype('varbinary', buffer(value), resulttype=pyodbc.BINARY, colsize=len(value)) return t for value in ANSI_SMALL_FENCEPOSTS: locals()['test_binary_buffer_%s' % len(value)] = _maketest(value) # bytearray if sys.hexversion >= 0x02060000: def _maketest(value): def t(self): self._test_strtype('varbinary', bytearray(value), colsize=len(value)) return t for value in ANSI_SMALL_FENCEPOSTS: locals()['test_binary_bytearray_%s' % len(value)] = _maketest(value) # varbinary(max) def _maketest(value): def t(self): self._test_strtype('varbinary', buffer(value), resulttype=pyodbc.BINARY, colsize='max') return t for value in ANSI_MAX_FENCEPOSTS: locals()['test_binarymax_buffer_%s' % len(value)] = _maketest(value) # bytearray if sys.hexversion >= 0x02060000: def _maketest(value): def t(self): self._test_strtype('varbinary', bytearray(value), colsize='max') return t for value in ANSI_MAX_FENCEPOSTS: locals()['test_binarymax_bytearray_%s' % len(value)] = _maketest(value) # # image # # # text # # def test_empty_text(self): # self._test_strliketype('text', bytearray('')) # # xml # # def test_empty_xml(self): # self._test_strliketype('xml', bytearray('')) # # bit # def test_bit(self): value = True self.cursor.execute("create table t1(b bit)") self.cursor.execute("insert into t1 values (?)", value) v = self.cursor.execute("select b from t1").fetchone()[0] self.assertEqual(type(v), bool) self.assertEqual(v, value) # # decimal # def _decimal(self, precision, scale, negative): # From test provided by planders (thanks!) in Issue 91 self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) # Construct a decimal that uses the maximum precision and scale. decStr = '9' * (precision - scale) if scale: decStr = decStr + "." + '9' * scale if negative: decStr = "-" + decStr value = Decimal(decStr) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(v, value) def _maketest(p, s, n): def t(self): self._decimal(p, s, n) return t for (p, s, n) in [ (1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (38, 38, True) ]: locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) def test_decimal_e(self): """Ensure exponential notation decimals are properly handled""" value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 self.cursor.execute("create table t1(d decimal(10, 2))") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(result, value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_empty_string_encoding(self): self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') value = "" self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_fixed_char(self): value = "testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_empty_unicode(self): self.cursor.execute("create table t1(s nvarchar(20))") self.cursor.execute("insert into t1 values(?)", u"") def test_empty_unicode_encoding(self): self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') value = "" self.cursor.execute("create table t1(s nvarchar(20))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_unicode_query(self): self.cursor.execute(u"select 1") # From issue #206 def _maketest(value): def t(self): self._test_strtype('nvarchar', value, colsize=len(value)) return t locals()['test_chinese_param'] = _maketest(u'我的') def test_chinese(self): v = u'我的' self.cursor.execute(u"SELECT N'我的' AS [Name]") row = self.cursor.fetchone() self.assertEqual(row[0], v) self.cursor.execute(u"SELECT N'我的' AS [Name]") rows = self.cursor.fetchall() self.assertEqual(rows[0][0], v) def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # date, time, datetime # def test_datetime(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(value, result) def test_datetime_fraction(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime # supported is xxx000. value = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(value, result) def test_datetime_fraction_rounded(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the # database supports. full = datetime(2007, 1, 15, 3, 4, 5, 123456) rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", full) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(rounded, result) def test_date(self): ver = self.get_sqlserver_version() if ver < 10: # 2008 only return # so pass / ignore value = date.today() self.cursor.execute("create table t1(d date)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(type(result), date) self.assertEqual(value, result) def test_time(self): ver = self.get_sqlserver_version() if ver < 10: # 2008 only return # so pass / ignore value = datetime.now().time() # We aren't yet writing values using the new extended time type so the value written to the database is only # down to the second. value = value.replace(microsecond=0) self.cursor.execute("create table t1(t time)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select t from t1").fetchone()[0] self.assertEqual(type(result), time) self.assertEqual(value, result) def test_datetime2(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime2)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(value, result) # # ints and floats # def test_int(self): value = 1234 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_bigint(self): input = 3000000000 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_float(self): value = 1234.567 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) # # stored procedures # # def test_callproc(self): # "callproc with a simple input-only stored procedure" # pass def test_sp_results(self): self.cursor.execute( """ Create procedure proc1 AS select top 10 name, id, xtype, refdate from sysobjects """) rows = self.cursor.execute("exec proc1").fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_results_from_temp(self): # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. # If you don't do this, you'd need to call nextset() once to skip it. self.cursor.execute( """ Create procedure proc1 AS set nocount on select top 10 name, id, xtype, refdate into #tmptable from sysobjects select * from #tmptable """) self.cursor.execute("exec proc1") self.assertTrue(self.cursor.description is not None) self.assertTrue(len(self.cursor.description) == 4) rows = self.cursor.fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_with_dates(self): # Reported in the forums that passing two datetimes to a stored procedure doesn't work. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@d1 datetime, @d2 datetime) AS declare @d as int set @d = datediff(year, @d1, @d2) select @d """) self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(rows[0][0] == 0) # 0 years apart def test_sp_with_none(self): # Reported in the forums that passing None caused an error. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@x varchar(20)) AS declare @y varchar(20) set @y = @x select @y """) self.cursor.execute("exec test_sp ?", None) rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(rows[0][0] == None) # 0 years apart # # rowcount # def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) def test_rowcount_select(self): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a select statement, so we'll test for that behavior. This is valid behavior according to the DB API specification, but people don't seem to like it. """ self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEqual(self.cursor.rowcount, -1) rows = self.cursor.fetchall() self.assertEqual(len(rows), count) self.assertEqual(self.cursor.rowcount, -1) def test_rowcount_reset(self): "Ensure rowcount is reset after DDL" ddl_rowcount = 0 if self.driver_type_is('freetds') else -1 self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.assertEqual(self.cursor.rowcount, 1) self.cursor.execute("create table t2(i int)") self.assertEqual(self.cursor.rowcount, ddl_rowcount) # # always return Cursor # # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very # confusing when things went wrong and added very little value even when things went right since users could always # use: cursor.execute("...").rowcount def test_retcursor_delete(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_select(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("select * from t1") self.assertEqual(v, self.cursor) # # misc # def table_with_spaces(self): "Ensure we can select using [x z] syntax" try: self.cursor.execute("create table [test one](int n)") self.cursor.execute("insert into [test one] values(1)") self.cursor.execute("select * from [test one]") v = self.cursor.fetchone()[0] self.assertEqual(v, 1) finally: self.cnxn.rollback() def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_temp_select(self): # A project was failing to create temporary tables via select into. self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), unicode) self.assertEqual(v, "testing") self.cursor.execute("select s into t2 from t1") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), unicode) self.assertEqual(v, "testing") def test_money(self): d = Decimal('123456.78') self.cursor.execute("create table t1(i int identity(1,1), m money)") self.cursor.execute("insert into t1(m) values (?)", d) v = self.cursor.execute("select m from t1").fetchone()[0] self.assertEqual(v, d) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_one(self): "Pass executemany a single sequence" self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, "test") ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_dae_0(self): """ DAE for 0-length value """ self.cursor.execute("create table t1(a nvarchar(max)) with (heap)") self.cursor.fast_executemany = True self.cursor.executemany("insert into t1(a) values(?)", [['']]) self.assertEqual(self.cursor.execute("select a from t1").fetchone()[0], '') self.cursor.fast_executemany = False def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_concatenation(self): v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() self.assertEqual(row.both, v2 + v3) def test_view_select(self): # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. # Create a table (t1) with 3 rows and a view (t2) into it. self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") for i in range(3): self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) self.cursor.execute("create view t2 as select * from t1") # Select from the view self.cursor.execute("select * from t2") rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(len(rows) == 3) self.cursor.execute("drop view t2") def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(self.connection_string, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def test_cursorcommit(self): "Ensure cursor.commit works" othercnxn = pyodbc.connect(self.connection_string, autocommit=True) othercursor = othercnxn.cursor() othercnxn = None othercursor.execute("create table t1(s varchar(20))") othercursor.execute("insert into t1 values(?)", 'test') othercursor.commit() value = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(value, 'test') def test_unicode_results(self): "Ensure unicode_results forces Unicode" othercnxn = pyodbc.connect(self.connection_string, unicode_results=True, autocommit=True) othercursor = othercnxn.cursor() # ANSI data in an ANSI column ... othercursor.execute("create table t1(s varchar(20))") othercursor.execute("insert into t1 values(?)", 'test') # ... should be returned as Unicode value = othercursor.execute("select s from t1").fetchone()[0] self.assertEqual(value, u'test') def test_skip(self): # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. self.cursor.execute("create table t1(id int)"); for i in range(1, 5): self.cursor.execute("insert into t1 values(?)", i) self.cursor.execute("select id from t1 order by id") self.assertEqual(self.cursor.fetchone()[0], 1) self.cursor.skip(2) self.assertEqual(self.cursor.fetchone()[0], 4) def test_timeout(self): self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) self.cnxn.timeout = 30 self.assertEqual(self.cnxn.timeout, 30) self.cnxn.timeout = 0 self.assertEqual(self.cnxn.timeout, 0) def test_sets_execute(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) self.assertRaises(pyodbc.ProgrammingError, f) def test_sets_executemany(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.executemany("insert into t1 (word) values (?)", [words]) self.assertRaises(TypeError, f) def test_row_execute(self): "Ensure we can use a Row object as a parameter to execute" self.cursor.execute("create table t1(n int, s varchar(10))") self.cursor.execute("insert into t1 values (1, 'a')") row = self.cursor.execute("select n, s from t1").fetchone() self.assertNotEqual(row, None) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.execute("insert into t2 values (?, ?)", row) def test_row_executemany(self): "Ensure we can use a Row object as a parameter to executemany" self.cursor.execute("create table t1(n int, s varchar(10))") for i in range(3): self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) rows = self.cursor.execute("select n, s from t1").fetchall() self.assertNotEqual(len(rows), 0) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.executemany("insert into t2 values (?, ?)", rows) def test_description(self): "Ensure cursor.description is correct" self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") self.cursor.execute("select * from t1") # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the # items I do know. # int t = self.cursor.description[0] self.assertEqual(t[0], 'n') self.assertEqual(t[1], int) self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # varchar(8) t = self.cursor.description[1] self.assertEqual(t[0], 's') self.assertEqual(t[1], str) self.assertEqual(t[4], 8) # precision self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # decimal(5, 2) t = self.cursor.description[2] self.assertEqual(t[0], 'd') self.assertEqual(t[1], Decimal) self.assertEqual(t[4], 5) # precision self.assertEqual(t[5], 2) # scale self.assertEqual(t[6], True) # nullable def test_none_param(self): "Ensure None can be used for params other than the first" # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the # first column, but did not work for later columns. # # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, # binary/varbinary won't allow an implicit conversion. self.cursor.execute("create table t1(n int, blob varbinary(max)) with(heap)") self.cursor.execute("insert into t1 values (1, 0x1234)") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 1) self.assertEqual(type(row.blob), bytearray) sql = "update t1 set n=?, blob=?" try: self.cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) self.cursor.execute(sql, 2, None) except pyodbc.DataError: if self.driver_type_is('freetds'): # FREETDS_KNOWN_ISSUE # # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so # pyodbc can't call SQLDescribeParam to get the correct parameter type. # This can lead to errors being returned from SQL Server when sp_prepexec is called, # e.g., "Implicit conversion from data type varchar to varbinary(max) is not allowed." # # So at least verify that the user can manually specify the parameter type self.cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) self.cursor.execute(sql, 2, None) else: raise row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 2) self.assertEqual(row.blob, None) def test_output_conversion(self): def convert(value): # `value` will be a string. We'll simply add an X at the beginning at the end. return 'X' + value + 'X' self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) self.cursor.execute("create table t1(n int, v varchar(10))") self.cursor.execute("insert into t1 values (1, '123.45')") value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') # Now clear the conversions and try again. There should be no Xs this time. self.cnxn.clear_output_converters() value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') def test_too_large(self): """Ensure error raised if insert fails due to truncation""" value = 'x' * 1000 self.cursor.execute("create table t1(s varchar(800))") def test(): self.cursor.execute("insert into t1 values (?)", value) self.assertRaises(pyodbc.DataError, test) def test_login_timeout(self): # This can only test setting since there isn't a way to cause it to block on the server side. cnxns = pyodbc.connect(self.connection_string, timeout=2) def test_row_equal(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test')") row1 = self.cursor.execute("select n, s from t1").fetchone() row2 = self.cursor.execute("select n, s from t1").fetchone() b = (row1 == row2) self.assertEqual(b, True) def test_row_gtlt(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test1')") self.cursor.execute("insert into t1 values (1, 'test2')") rows = self.cursor.execute("select n, s from t1 order by s").fetchall() self.assertTrue(rows[0] < rows[1]) self.assertTrue(rows[0] <= rows[1]) self.assertTrue(rows[1] > rows[0]) self.assertTrue(rows[1] >= rows[0]) self.assertTrue(rows[0] != rows[1]) rows = list(rows) rows.sort() # uses < def test_context_manager_success(self): """ Ensure a successful with statement causes a commit. """ self.cursor.execute("create table t1(n int)") with pyodbc.connect(self.connection_string) as cnxn: cursor = cnxn.cursor() cursor.execute("insert into t1 values (1)") cnxn = None cursor = None rows = self.cursor.execute("select n from t1").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0][0], 1) def test_context_manager_fail(self): """ Ensure an exception in a with statement causes a rollback. """ self.cursor.execute("create table t1(n int)") try: with pyodbc.connect(self.connection_string) as cnxn: cursor = cnxn.cursor() cursor.execute("insert into t1 values (1)") raise Exception("Testing failure") except Exception: pass cnxn = None cursor = None count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, 0) def test_cursor_context_manager_success(self): """ Ensure a successful with statement using a cursor causes a commit. """ self.cursor.execute("create table t1(n int)") with pyodbc.connect(self.connection_string).cursor() as cursor: cursor.execute("insert into t1 values (1)") cursor = None rows = self.cursor.execute("select n from t1").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0][0], 1) def test_cursor_context_manager_fail(self): """ Ensure an exception in a with statement using a cursor causes a rollback. """ self.cursor.execute("create table t1(n int)") try: with pyodbc.connect(self.connection_string).cursor() as cursor: cursor.execute("insert into t1 values (1)") raise Exception("Testing failure") except Exception: pass cursor = None count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, 0) def test_untyped_none(self): # From issue 129 value = self.cursor.execute("select ?", None).fetchone()[0] self.assertEqual(value, None) def test_large_update_nodata(self): self.cursor.execute('create table t1(a varbinary(max)) with(heap)') hundredkb = bytearray('x'*100*1024) self.cursor.setinputsizes([(pyodbc.SQL_VARBINARY,0,0)]) self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) def test_no_fetch(self): # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to # confuse the driver. self.cursor.execute('select 1') self.cursor.execute('select 1') self.cursor.execute('select 1') def test_drivers(self): drivers = pyodbc.drivers() self.assertEqual(list, type(drivers)) self.assertTrue(len(drivers) > 0) m = re.search('DRIVER={?([^}]+?)}?;', self.connection_string, re.IGNORECASE) current = m.group(1) self.assertTrue(current in drivers) def test_prepare_cleanup(self): # When statement is prepared, it is kept in case the next execute uses the same statement. This must be # removed when a non-execute statement is used that returns results, such as SQLTables. self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") self.cursor.fetchone() self.cursor.tables("bogus") self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") self.cursor.fetchone() def test_emoticons(self): # https://github.com/mkleehammer/pyodbc/issues/423 # # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number # of characters. Ensure it works even with 4-byte characters. # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = "x \U0001F31C z" self.cursor.execute("create table t1(s varchar(100))") self.cursor.execute("insert into t1 values (?)", v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def main(): from optparse import OptionParser parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('sqldwtests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string) print_library_info(cnxn) cnxn.close() suite = load_tests(SqlServerTestCase, options.test, connection_string) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests2/sqlite.db0000664000175000017500000000400000000000000017451 0ustar00mkleehammermkleehammerSQLite format 3@ BBB- 3Otablet1t1CREATE TABLE t1(a int, b char(3))././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629143953.0 pyodbc-4.0.32/tests2/sqlitetests.py0000775000175000017500000006462200000000000020622 0ustar00mkleehammermkleehammer#!/usr/bin/python # -*- coding: latin-1 -*- usage = """\ usage: %prog [options] connection_string Unit tests for SQLite using the ODBC driver from http://www.ch-werner.de/sqliteodbc To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. On Windows, use the 32-bit driver with 32-bit Python and the 64-bit driver with 64-bit Python (regardless of your operating system bitness). These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. You can also put the connection string into a tmp/setup.cfg file like so: [sqlitetests] connection-string=Driver=SQLite3 ODBC Driver;Database=sqlite.db """ import sys, os, re import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import join, getsize, dirname, abspath from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) / len(_TESTSTR) v = _TESTSTR * c return v[:length] class SqliteTestCase(unittest.TestCase): SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] IMAGE_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] def __init__(self, method_name, connection_string): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string def setUp(self): self.cnxn = pyodbc.connect(self.connection_string) self.cursor = self.cnxn.cursor() for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, (int, long))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def test_fixed_unicode(self): value = u"t\xebsting" self.cursor.execute("create table t1(s nchar(7))") self.cursor.execute("insert into t1 values(?)", u"t\xebsting") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), unicode) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def _test_strtype(self, sqltype, value, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), type(value)) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) # Reported by Andy Hochhaus in the pyodbc group: In 2.1.7 and earlier, a hardcoded length of 255 was used to # determine whether a parameter was bound as a SQL_VARCHAR or SQL_LONGVARCHAR. Apparently SQL Server chokes if # we bind as a SQL_LONGVARCHAR and the target column size is 8000 or less, which is considers just SQL_VARCHAR. # This means binding a 256 character value would cause problems if compared with a VARCHAR column under # 8001. We now use SQLGetTypeInfo to determine the time to switch. # # [42000] [Microsoft][SQL Server Native Client 10.0][SQL Server]The data types varchar and text are incompatible in the equal to operator. self.cursor.execute("select * from t1 where s=?", value) def _test_strliketype(self, sqltype, value, colsize=None): """ The implementation for text, image, ntext, and binary. These types do not support comparison operators. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), type(value)) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) # # text # def test_text_null(self): self._test_strtype('text', None, 100) # Generate a test for each fencepost size: test_text_0, etc. def _maketest(value): def t(self): self._test_strtype('text', value, len(value)) return t for value in UNICODE_FENCEPOSTS: locals()['test_text_%s' % len(value)] = _maketest(value) def test_text_upperlatin(self): self._test_strtype('varchar', u'') # # blob # def test_null_blob(self): self._test_strtype('blob', None, 100) def test_large_null_blob(self): # Bug 1575064 self._test_strtype('blob', None, 4000) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('blob', bytearray(value), len(value)) return t for value in ANSI_FENCEPOSTS: locals()['test_blob_%s' % len(value)] = _maketest(value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_unicode(self): self.cursor.execute("create table t1(s nvarchar(20))") self.cursor.execute("insert into t1 values(?)", u"") def test_unicode_query(self): self.cursor.execute(u"select 1") def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # ints and floats # def test_int(self): value = 1234 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_bigint(self): input = 3000000000 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_negative_bigint(self): # Issue 186: BIGINT problem on 32-bit architeture input = -430000000 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_float(self): value = 1234.567 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) # # rowcount # # Note: SQLRowCount does not define what the driver must return after a select statement # and says that its value should not be relied upon. The sqliteodbc driver is hardcoded to # return 0 so I've deleted the test. def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very # confusing when things went wrong and added very little value even when things went right since users could always # use: cursor.execute("...").rowcount def test_retcursor_delete(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_select(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("select * from t1") self.assertEqual(v, self.cursor) # # misc # def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_one(self): "Pass executemany a single sequence" self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, "test") ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_view_select(self): # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. # Create a table (t1) with 3 rows and a view (t2) into it. self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") for i in range(3): self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) self.cursor.execute("create view t2 as select * from t1") # Select from the view self.cursor.execute("select * from t2") rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(len(rows) == 3) def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(self.connection_string, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def test_unicode_results(self): "Ensure unicode_results forces Unicode" othercnxn = pyodbc.connect(self.connection_string, unicode_results=True) othercursor = othercnxn.cursor() # ANSI data in an ANSI column ... othercursor.execute("create table t1(s varchar(20))") othercursor.execute("insert into t1 values(?)", 'test') # ... should be returned as Unicode value = othercursor.execute("select s from t1").fetchone()[0] self.assertEqual(value, u'test') def test_skip(self): # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. self.cursor.execute("create table t1(id int)"); for i in range(1, 5): self.cursor.execute("insert into t1 values(?)", i) self.cursor.execute("select id from t1 order by id") self.assertEqual(self.cursor.fetchone()[0], 1) self.cursor.skip(2) self.assertEqual(self.cursor.fetchone()[0], 4) def test_sets_execute(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) self.assertRaises(pyodbc.ProgrammingError, f) def test_sets_executemany(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.executemany("insert into t1 (word) values (?)", [words]) self.assertRaises(TypeError, f) def test_row_execute(self): "Ensure we can use a Row object as a parameter to execute" self.cursor.execute("create table t1(n int, s varchar(10))") self.cursor.execute("insert into t1 values (1, 'a')") row = self.cursor.execute("select n, s from t1").fetchone() self.assertNotEqual(row, None) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.execute("insert into t2 values (?, ?)", row) def test_row_executemany(self): "Ensure we can use a Row object as a parameter to executemany" self.cursor.execute("create table t1(n int, s varchar(10))") for i in range(3): self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) rows = self.cursor.execute("select n, s from t1").fetchall() self.assertNotEqual(len(rows), 0) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.executemany("insert into t2 values (?, ?)", rows) def test_description(self): "Ensure cursor.description is correct" self.cursor.execute("create table t1(n int, s text)") self.cursor.execute("insert into t1 values (1, 'abc')") self.cursor.execute("select * from t1") # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the # items I do know. # int t = self.cursor.description[0] self.assertEqual(t[0], 'n') self.assertEqual(t[1], int) self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # text t = self.cursor.description[1] self.assertEqual(t[0], 's') self.assertEqual(t[1], str) self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable def test_row_equal(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test')") row1 = self.cursor.execute("select n, s from t1").fetchone() row2 = self.cursor.execute("select n, s from t1").fetchone() b = (row1 == row2) self.assertEqual(b, True) def test_row_gtlt(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test1')") self.cursor.execute("insert into t1 values (1, 'test2')") rows = self.cursor.execute("select n, s from t1 order by s").fetchall() self.assertTrue(rows[0] < rows[1]) self.assertTrue(rows[0] <= rows[1]) self.assertTrue(rows[1] > rows[0]) self.assertTrue(rows[1] >= rows[0]) self.assertTrue(rows[0] != rows[1]) rows = list(rows) rows.sort() # uses < def _test_context_manager(self): # TODO: This is failing, but it may be due to the design of sqlite. I've disabled it # for now until I can research it some more. # WARNING: This isn't working right now. We've set the driver's autocommit to "off", # but that doesn't automatically start a transaction. I'm not familiar enough with the # internals of the driver to tell what is going on, but it looks like there is support # for the autocommit flag. # # I thought it might be a timing issue, like it not actually starting a txn until you # try to do something, but that doesn't seem to work either. I'll leave this in to # remind us that it isn't working yet but we need to contact the SQLite ODBC driver # author for some guidance. with pyodbc.connect(self.connection_string) as cnxn: cursor = cnxn.cursor() cursor.execute("begin") cursor.execute("create table t1(i int)") cursor.execute('rollback') # The connection should be closed now. def test(): cnxn.execute('rollback') self.assertRaises(pyodbc.Error, test) def test_untyped_none(self): # From issue 129 value = self.cursor.execute("select ?", None).fetchone()[0] self.assertEqual(value, None) def test_large_update_nodata(self): self.cursor.execute('create table t1(a blob)') hundredkb = 'x'*100*1024 self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) def test_no_fetch(self): # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to # confuse the driver. self.cursor.execute('select 1') self.cursor.execute('select 1') self.cursor.execute('select 1') def main(): from optparse import OptionParser parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", default=0, action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('sqlitetests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string) print_library_info(cnxn) cnxn.close() suite = load_tests(SqliteTestCase, options.test, connection_string) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629396312.0 pyodbc-4.0.32/tests2/sqlservertests.py0000775000175000017500000022555400000000000021352 0ustar00mkleehammermkleehammer#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function usage = """\ usage: %prog [options] connection_string Unit tests for SQL Server. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. You can also put the connection string into a tmp/setup.cfg file like so: [sqlservertests] connection-string=DRIVER={SQL Server};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db The connection string above will use the 2000/2005 driver, even if SQL Server 2008 is installed: 2000: DRIVER={SQL Server} 2005: DRIVER={SQL Server} 2008: DRIVER={SQL Server Native Client 10.0} If using FreeTDS ODBC, be sure to use version 1.00.97 or newer. """ import sys, os, re, uuid import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import join, getsize, dirname, abspath from warnings import warn from testutils import * # Some tests have fallback code for known driver issues. # Change this value to False to bypass the fallback code, e.g., to see # if a newer version of the driver has fixed the underlying issue. # handle_known_issues = True _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) / len(_TESTSTR) v = _TESTSTR * c return v[:length] class SqlServerTestCase(unittest.TestCase): SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] MAX_FENCEPOST_SIZES = [ 5 * 1024 * 1024 ] #, 50 * 1024 * 1024 ] ANSI_SMALL_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] UNICODE_SMALL_FENCEPOSTS = [ unicode(s) for s in ANSI_SMALL_FENCEPOSTS ] ANSI_LARGE_FENCEPOSTS = ANSI_SMALL_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] UNICODE_LARGE_FENCEPOSTS = UNICODE_SMALL_FENCEPOSTS + [ unicode(s) for s in [_generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ]] ANSI_MAX_FENCEPOSTS = ANSI_LARGE_FENCEPOSTS + [ _generate_test_string(size) for size in MAX_FENCEPOST_SIZES ] UNICODE_MAX_FENCEPOSTS = UNICODE_LARGE_FENCEPOSTS + [ unicode(s) for s in [_generate_test_string(size) for size in MAX_FENCEPOST_SIZES ]] def __init__(self, method_name, connection_string): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string def driver_type_is(self, type_name): recognized_types = { 'msodbcsql': '(Microsoft) ODBC Driver xx for SQL Server', 'freetds': 'FreeTDS ODBC', } if not type_name in recognized_types.keys(): raise KeyError('"{0}" is not a recognized driver type: {1}'.format(type_name, list(recognized_types.keys()))) driver_name = self.cnxn.getinfo(pyodbc.SQL_DRIVER_NAME).lower() if type_name == 'msodbcsql': return ('msodbcsql' in driver_name) or ('sqlncli' in driver_name) or ('sqlsrv32.dll' == driver_name) elif type_name == 'freetds': return ('tdsodbc' in driver_name) def handle_known_issues_for(self, type_name, print_reminder=False): """ Checks driver `type_name` and "killswitch" variable `handle_known_issues` to see if known issue handling should be bypassed. Optionally prints a reminder message to help identify tests that previously had issues but may have been fixed by a newer version of the driver. Usage examples: # 1. print reminder at beginning of test (before any errors can occur) # def test_some_feature(self): self.handle_known_issues_for('freetds', print_reminder=True) # (continue with test code) # 2. conditional execution of fallback code # try: # (some test code) except pyodbc.DataError: if self.handle_known_issues_for('freetds'): # FREETDS_KNOWN_ISSUE # # (fallback code to work around exception) else: raise """ if self.driver_type_is(type_name): if handle_known_issues: return True else: if print_reminder: print("Known issue handling is disabled. Does this test still fail?") return False def driver_type_is(self, type_name): recognized_types = { 'msodbcsql': '(Microsoft) ODBC Driver xx for SQL Server', 'freetds': 'FreeTDS ODBC', } if not type_name in recognized_types.keys(): raise KeyError('"{0}" is not a recognized driver type: {1}'.format(type_name, list(recognized_types.keys()))) driver_name = self.cnxn.getinfo(pyodbc.SQL_DRIVER_NAME).lower() if type_name == 'msodbcsql': return ('msodbcsql' in driver_name) or ('sqlncli' in driver_name) or ('sqlsrv32.dll' == driver_name) elif type_name == 'freetds': return ('tdsodbc' in driver_name) def get_sqlserver_version(self): """ Returns the major version: 8-->2000, 9-->2005, 10-->2008 """ self.cursor.execute("exec master..xp_msver 'ProductVersion'") row = self.cursor.fetchone() return int(row.Character_Value.split('.', 1)[0]) def setUp(self): self.cnxn = pyodbc.connect(self.connection_string) self.cursor = self.cnxn.cursor() for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass for i in range(3): try: self.cursor.execute("drop procedure proc%d" % i) self.cnxn.commit() except: pass try: self.cursor.execute('drop function func1') self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_binary_type(self): if sys.hexversion >= 0x02060000: self.assertTrue(pyodbc.BINARY is bytearray) else: self.assertTrue(pyodbc.BINARY is buffer) def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, (int, long))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def test_noscan(self): self.assertEqual(self.cursor.noscan, False) self.cursor.noscan = True self.assertEqual(self.cursor.noscan, True) def test_nonnative_uuid(self): # The default is False meaning we should return a string. Note that # SQL Server seems to always return uppercase. value = uuid.uuid4() self.cursor.execute("create table t1(n uniqueidentifier)") self.cursor.execute("insert into t1 values (?)", value) pyodbc.native_uuid = False result = self.cursor.execute("select n from t1").fetchval() self.assertEqual(type(result), unicode) self.assertEqual(result, unicode(value).upper()) def test_native_uuid(self): # When true, we should return a uuid.UUID object. value = uuid.uuid4() self.cursor.execute("create table t1(n uniqueidentifier)") self.cursor.execute("insert into t1 values (?)", value) pyodbc.native_uuid = True result = self.cursor.execute("select n from t1").fetchval() self.assertTrue(isinstance(result, uuid.UUID)) self.assertEqual(value, result) def test_nextset(self): self.cursor.execute("create table t1(i int)") for i in range(4): self.cursor.execute("insert into t1(i) values(?)", i) self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") for i, row in enumerate(self.cursor): self.assertEqual(i, row.i) self.assertEqual(self.cursor.nextset(), True) for i, row in enumerate(self.cursor): self.assertEqual(i + 2, row.i) def test_nextset_with_raiserror(self): self.cursor.execute("select i = 1; RAISERROR('c', 16, 1);") row = next(self.cursor) self.assertEqual(1, row.i) if self.driver_type_is('freetds'): warn('FREETDS_KNOWN_ISSUE - test_nextset_with_raiserror: test cancelled.') # AssertionError: ProgrammingError not raised by nextset # https://github.com/FreeTDS/freetds/issues/230 return # for now self.assertRaises(pyodbc.ProgrammingError, self.cursor.nextset) def test_fixed_unicode(self): value = u"t\xebsting" self.cursor.execute("create table t1(s nchar(7))") self.cursor.execute("insert into t1 values(?)", u"t\xebsting") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), unicode) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def _test_strtype(self, sqltype, value, resulttype=None, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize in (None, 'max') or isinstance(colsize, int), colsize assert colsize in (None, 'max') or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) if resulttype is None: resulttype = type(value) sql = "insert into t1 values(?)" try: self.cursor.execute(sql, value) except pyodbc.DataError: if self.driver_type_is('freetds'): # FREETDS_KNOWN_ISSUE # # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so # pyodbc can't call SQLDescribeParam to get the correct parameter type. # This can lead to errors being returned from SQL Server when sp_prepexec is called, # e.g., "Implicit conversion from data type varchar to varbinary is not allowed." # for test_binary_null # # So at least verify that the user can manually specify the parameter type if sqltype == 'varbinary': sql_param_type = pyodbc.SQL_VARBINARY # (add elif blocks for other cases as required) self.cursor.setinputsizes([(sql_param_type, colsize, 0)]) self.cursor.execute(sql, value) else: raise v = self.cursor.execute("select * from t1").fetchone()[0] # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before # comparing. if type(value) is not resulttype: value = resulttype(value) self.assertEqual(v, value) def _test_strliketype(self, sqltype, value, resulttype=None, colsize=None): """ The implementation for text, image, ntext, and binary. These types do not support comparison operators. """ assert colsize is None or isinstance(colsize, int), colsize assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype if resulttype is None: resulttype = type(value) self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(result), resulttype) # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before # comparing. if type(value) is not resulttype: value = resulttype(value) self.assertEqual(result, value) # # varchar # def test_varchar_null(self): self._test_strtype('varchar', None, colsize=100) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, colsize=len(value)) return t for value in UNICODE_SMALL_FENCEPOSTS: locals()['test_varchar_%s' % len(value)] = _maketest(value) # Also test varchar(max) def _maketest(value): def t(self): self._test_strtype('varchar', value, colsize='max') return t for value in UNICODE_MAX_FENCEPOSTS: locals()['test_varcharmax_%s' % len(value)] = _maketest(value) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) def test_varchar_upperlatin(self): self._test_strtype('varchar', u'\u00e5', colsize=1) # # nvarchar # def test_nvarchar_null(self): self._test_strtype('nvarchar', None, colsize=100) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('nvarchar', value, colsize=len(value)) return t for value in UNICODE_SMALL_FENCEPOSTS: locals()['test_nvarchar_%s' % len(value)] = _maketest(value) # Also test nvarchar(max) def _maketest(value): def t(self): self._test_strtype('nvarchar', value, colsize='max') return t for value in UNICODE_MAX_FENCEPOSTS: locals()['test_nvarcharmax_%s' % len(value)] = _maketest(value) def test_unicode_upperlatin(self): self._test_strtype('nvarchar', u'\u00e5', colsize=1) def test_unicode_longmax(self): # Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes ver = self.get_sqlserver_version() if ver < 9: # 2005+ return # so pass / ignore self.cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))") def test_fast_executemany_to_local_temp_table(self): if self.driver_type_is('freetds'): warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_to_local_temp_table: test cancelled.') return v = u'Ώπα' self.cursor.execute("CREATE TABLE #issue295 (id INT IDENTITY PRIMARY KEY, txt NVARCHAR(50))") sql = "INSERT INTO #issue295 (txt) VALUES (?)" params = [(v,)] self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 50, 0)]) self.cursor.fast_executemany = True self.cursor.executemany(sql, params) self.assertEqual(self.cursor.execute("SELECT txt FROM #issue295").fetchval(), v) def test_fast_executemany_to_datetime2(self): if self.handle_known_issues_for('freetds', print_reminder=True): warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_to_datetime2: test cancelled.') return v = datetime(2019, 3, 12, 10, 0, 0, 123456) self.cursor.execute("CREATE TABLE ##issue540 (dt2 DATETIME2(2))") sql = "INSERT INTO ##issue540 (dt2) VALUES (?)" params = [(v,)] self.cursor.fast_executemany = True self.cursor.executemany(sql, params) self.assertEqual(self.cursor.execute("SELECT CAST(dt2 AS VARCHAR) FROM ##issue540").fetchval(), '2019-03-12 10:00:00.12') def test_fast_executemany_high_unicode(self): if self.handle_known_issues_for('freetds', print_reminder=True): warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_high_unicode: test cancelled.') return v = u"🎥" self.cursor.fast_executemany = True self.cursor.execute("CREATE TABLE t1 (col1 nvarchar(max) null)") self.cursor.executemany("INSERT INTO t1 (col1) VALUES (?)", [[v,]]) self.assertEqual(self.cursor.execute("SELECT * FROM t1").fetchone()[0], v) # # binary # def test_binary_null(self): self._test_strtype('varbinary', None, colsize=100) def test_large_binary_null(self): # Bug 1575064 self._test_strtype('varbinary', None, colsize=4000) def test_binaryNull_object(self): self.cursor.execute("create table t1(n varbinary(10))") self.cursor.execute("insert into t1 values (?)", pyodbc.BinaryNull); # buffer def _maketest(value): def t(self): self._test_strtype('varbinary', buffer(value), resulttype=pyodbc.BINARY, colsize=len(value)) return t for value in ANSI_SMALL_FENCEPOSTS: locals()['test_binary_buffer_%s' % len(value)] = _maketest(value) # bytearray if sys.hexversion >= 0x02060000: def _maketest(value): def t(self): self._test_strtype('varbinary', bytearray(value), colsize=len(value)) return t for value in ANSI_SMALL_FENCEPOSTS: locals()['test_binary_bytearray_%s' % len(value)] = _maketest(value) # varbinary(max) def _maketest(value): def t(self): self._test_strtype('varbinary', buffer(value), resulttype=pyodbc.BINARY, colsize='max') return t for value in ANSI_MAX_FENCEPOSTS: locals()['test_binarymax_buffer_%s' % len(value)] = _maketest(value) # bytearray if sys.hexversion >= 0x02060000: def _maketest(value): def t(self): self._test_strtype('varbinary', bytearray(value), colsize='max') return t for value in ANSI_MAX_FENCEPOSTS: locals()['test_binarymax_bytearray_%s' % len(value)] = _maketest(value) # # image # def test_image_null(self): self._test_strliketype('image', None, type(None)) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strliketype('image', buffer(value), pyodbc.BINARY) return t for value in ANSI_LARGE_FENCEPOSTS: locals()['test_image_buffer_%s' % len(value)] = _maketest(value) if sys.hexversion >= 0x02060000: # Python 2.6+ supports bytearray, which pyodbc considers varbinary. # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('image', bytearray(value)) return t for value in ANSI_LARGE_FENCEPOSTS: locals()['test_image_bytearray_%s' % len(value)] = _maketest(value) def test_image_upperlatin(self): self._test_strliketype('image', buffer('á'), pyodbc.BINARY) # # text # # def test_empty_text(self): # self._test_strliketype('text', bytearray('')) def test_null_text(self): self._test_strliketype('text', None, type(None)) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strliketype('text', value) return t for value in UNICODE_SMALL_FENCEPOSTS: locals()['test_text_buffer_%s' % len(value)] = _maketest(value) def test_text_upperlatin(self): self._test_strliketype('text', u'á') # # xml # # def test_empty_xml(self): # self._test_strliketype('xml', bytearray('')) def test_null_xml(self): self._test_strliketype('xml', None, type(None)) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strliketype('xml', value) return t for value in UNICODE_SMALL_FENCEPOSTS: locals()['test_xml_buffer_%s' % len(value)] = _maketest(value) def test_xml_str(self): # SQL Server treats XML like *binary* data. # See https://msdn.microsoft.com/en-us/library/ms131375.aspx # # The real problem with this is that we *don't* know that a value is # XML when we write it to the database. It is either an `str` or a # `unicode` object, so we're going to convert it into one of *two* # different formats. # # When we read it out of the database, all we know is that it is XML # and we don't know how it was encoded so we don't know how to decode # it. Since almost everyone treats XML as Unicode nowdays, we're going # to decode XML as Unicode. Force your XML to Unicode before writing # to the database. (Otherwise, set a global encoder for the XMl type.) ascii = 'test' val = unicode(ascii) self.cursor.execute("create table t1(a xml)") self.cursor.execute("insert into t1 values (?)", val) result = self.cursor.execute("select a from t1").fetchval() self.assertEqual(result, val) def test_xml_upperlatin(self): val = u'á' self.cursor.execute("create table t1(a xml)") self.cursor.execute("insert into t1 values (?)", val) result = self.cursor.execute("select a from t1").fetchval() self.assertEqual(result, val) # # bit # def test_bit(self): value = True self.cursor.execute("create table t1(b bit)") self.cursor.execute("insert into t1 values (?)", value) v = self.cursor.execute("select b from t1").fetchone()[0] self.assertEqual(type(v), bool) self.assertEqual(v, value) # # decimal # def _decimal(self, precision, scale, negative): # From test provided by planders (thanks!) in Issue 91 self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) # Construct a decimal that uses the maximum precision and scale. decStr = '9' * (precision - scale) if scale: decStr = decStr + "." + '9' * scale if negative: decStr = "-" + decStr value = Decimal(decStr) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(v, value) def _maketest(p, s, n): def t(self): self._decimal(p, s, n) return t for (p, s, n) in [ (1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (38, 38, True) ]: locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) def test_decimal_e(self): """Ensure exponential notation decimals are properly handled""" value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 self.cursor.execute("create table t1(d decimal(10, 2))") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(result, value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_empty_string_encoding(self): self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') value = "" self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_fixed_char(self): value = "testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_empty_unicode(self): self.cursor.execute("create table t1(s nvarchar(20))") self.cursor.execute("insert into t1 values(?)", u"") def test_empty_unicode_encoding(self): self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') value = "" self.cursor.execute("create table t1(s nvarchar(20))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_unicode_query(self): self.cursor.execute(u"select 1") # From issue #206 def _maketest(value): def t(self): self._test_strtype('nvarchar', value, colsize=len(value)) return t locals()['test_chinese_param'] = _maketest(u'我的') def test_chinese(self): v = u'我的' self.cursor.execute(u"SELECT N'我的' AS [Name]") row = self.cursor.fetchone() self.assertEqual(row[0], v) self.cursor.execute(u"SELECT N'我的' AS [Name]") rows = self.cursor.fetchall() self.assertEqual(rows[0][0], v) def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # date, time, datetime # def test_datetime(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(value, result) def test_datetime_fraction(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime # supported is xxx000. value = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(value, result) def test_datetime_fraction_rounded(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the # database supports. full = datetime(2007, 1, 15, 3, 4, 5, 123456) rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", full) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(rounded, result) def test_date(self): ver = self.get_sqlserver_version() if ver < 10: # 2008 only return # so pass / ignore value = date.today() self.cursor.execute("create table t1(d date)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(type(result), date) self.assertEqual(value, result) def test_time(self): ver = self.get_sqlserver_version() if ver < 10: # 2008 only return # so pass / ignore value = datetime.now().time() # We aren't yet writing values using the new extended time type so the value written to the database is only # down to the second. value = value.replace(microsecond=0) self.cursor.execute("create table t1(t time)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select t from t1").fetchone()[0] self.assertEqual(type(result), time) self.assertEqual(value, result) def test_datetime2(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime2)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(value, result) # # ints and floats # def test_int(self): value = 1234 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_bigint(self): input = 3000000000 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_overflow_int(self): # python allows integers of any size, bigger than an 8 byte int can contain input = 9999999999999999999999999999999999999 self.cursor.execute("create table t1(d bigint)") self.cnxn.commit() self.assertRaises(OverflowError, self.cursor.execute, "insert into t1 values (?)", input) result = self.cursor.execute("select * from t1").fetchall() self.assertEqual(result, []) def test_float(self): value = 1234.567 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_denorm_float(self): value = 0.00012345 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) def test_non_numeric_float(self): self.cursor.execute("create table t1(d float)") self.cnxn.commit() for input in (float('+Infinity'), float('-Infinity'), float('NaN')): self.assertRaises(pyodbc.ProgrammingError, self.cursor.execute, "insert into t1 values (?)", input) result = self.cursor.execute("select * from t1").fetchall() self.assertEqual(result, []) # # stored procedures # # def test_callproc(self): # "callproc with a simple input-only stored procedure" # pass def test_sp_results(self): self.cursor.execute( """ Create procedure proc1 AS select top 10 name, id, xtype, refdate from sysobjects """) rows = self.cursor.execute("exec proc1").fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_results_from_temp(self): # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. # If you don't do this, you'd need to call nextset() once to skip it. self.cursor.execute( """ Create procedure proc1 AS set nocount on select top 10 name, id, xtype, refdate into #tmptable from sysobjects select * from #tmptable """) self.cursor.execute("exec proc1") self.assertTrue(self.cursor.description is not None) self.assertTrue(len(self.cursor.description) == 4) rows = self.cursor.fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_results_from_vartbl(self): self.cursor.execute( """ Create procedure proc1 AS set nocount on declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime) insert into @tmptbl select top 10 name, id, xtype, refdate from sysobjects select * from @tmptbl """) self.cursor.execute("exec proc1") rows = self.cursor.fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_with_dates(self): # Reported in the forums that passing two datetimes to a stored procedure doesn't work. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@d1 datetime, @d2 datetime) AS declare @d as int set @d = datediff(year, @d1, @d2) select @d """) self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(rows[0][0] == 0) # 0 years apart def test_sp_with_none(self): # Reported in the forums that passing None caused an error. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@x varchar(20)) AS declare @y varchar(20) set @y = @x select @y """) self.cursor.execute("exec test_sp ?", None) rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(rows[0][0] == None) # 0 years apart # # rowcount # def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) def test_rowcount_select(self): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a select statement, so we'll test for that behavior. This is valid behavior according to the DB API specification, but people don't seem to like it. """ self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEqual(self.cursor.rowcount, -1) rows = self.cursor.fetchall() self.assertEqual(len(rows), count) self.assertEqual(self.cursor.rowcount, -1) def test_rowcount_reset(self): "Ensure rowcount is reset after DDL" ddl_rowcount = 0 if self.driver_type_is('freetds') else -1 self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.assertEqual(self.cursor.rowcount, 1) self.cursor.execute("create table t2(i int)") self.assertEqual(self.cursor.rowcount, ddl_rowcount) # # always return Cursor # # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very # confusing when things went wrong and added very little value even when things went right since users could always # use: cursor.execute("...").rowcount def test_retcursor_delete(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_select(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("select * from t1") self.assertEqual(v, self.cursor) # # misc # def table_with_spaces(self): "Ensure we can select using [x z] syntax" try: self.cursor.execute("create table [test one](int n)") self.cursor.execute("insert into [test one] values(1)") self.cursor.execute("select * from [test one]") v = self.cursor.fetchone()[0] self.assertEqual(v, 1) finally: self.cnxn.rollback() def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_temp_select(self): # A project was failing to create temporary tables via select into. self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), unicode) self.assertEqual(v, "testing") self.cursor.execute("select s into t2 from t1") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), unicode) self.assertEqual(v, "testing") def test_money(self): d = Decimal('123456.78') self.cursor.execute("create table t1(i int identity(1,1), m money)") self.cursor.execute("insert into t1(m) values (?)", d) v = self.cursor.execute("select m from t1").fetchone()[0] self.assertEqual(v, d) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_one(self): "Pass executemany a single sequence" self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, "test") ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_dae_0(self): """ DAE for 0-length value """ self.cursor.execute("create table t1(a nvarchar(max))") self.cursor.fast_executemany = True self.cursor.executemany("insert into t1(a) values(?)", [['']]) self.assertEqual(self.cursor.execute("select a from t1").fetchone()[0], '') self.cursor.fast_executemany = False def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_concatenation(self): v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() self.assertEqual(row.both, v2 + v3) def test_view_select(self): # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. # Create a table (t1) with 3 rows and a view (t2) into it. self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") for i in range(3): self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) self.cursor.execute("create view t2 as select * from t1") # Select from the view self.cursor.execute("select * from t2") rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(len(rows) == 3) def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(self.connection_string, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def test_cursorcommit(self): "Ensure cursor.commit works" othercnxn = pyodbc.connect(self.connection_string) othercursor = othercnxn.cursor() othercnxn = None othercursor.execute("create table t1(s varchar(20))") othercursor.execute("insert into t1 values(?)", 'test') othercursor.commit() value = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(value, 'test') def test_unicode_results(self): "Ensure unicode_results forces Unicode" othercnxn = pyodbc.connect(self.connection_string, unicode_results=True) othercursor = othercnxn.cursor() # ANSI data in an ANSI column ... othercursor.execute("create table t1(s varchar(20))") othercursor.execute("insert into t1 values(?)", 'test') # ... should be returned as Unicode value = othercursor.execute("select s from t1").fetchone()[0] self.assertEqual(value, u'test') def test_sqlserver_callproc(self): try: self.cursor.execute("drop procedure pyodbctest") self.cnxn.commit() except: pass self.cursor.execute("create table t1(s varchar(10))") self.cursor.execute("insert into t1 values(?)", "testing") self.cursor.execute(""" create procedure pyodbctest @var1 varchar(32) as begin select s from t1 return end """) self.cnxn.commit() # for row in self.cursor.procedureColumns('pyodbctest'): # print row.procedure_name, row.column_name, row.column_type, row.type_name self.cursor.execute("exec pyodbctest 'hi'") # print self.cursor.description # for row in self.cursor: # print row.s def test_skip(self): # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. self.cursor.execute("create table t1(id int)"); for i in range(1, 5): self.cursor.execute("insert into t1 values(?)", i) self.cursor.execute("select id from t1 order by id") self.assertEqual(self.cursor.fetchone()[0], 1) self.cursor.skip(2) self.assertEqual(self.cursor.fetchone()[0], 4) def test_timeout(self): self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) self.cnxn.timeout = 30 self.assertEqual(self.cnxn.timeout, 30) self.cnxn.timeout = 0 self.assertEqual(self.cnxn.timeout, 0) def test_sets_execute(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) self.assertRaises(pyodbc.ProgrammingError, f) def test_sets_executemany(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.executemany("insert into t1 (word) values (?)", [words]) self.assertRaises(TypeError, f) def test_row_execute(self): "Ensure we can use a Row object as a parameter to execute" self.cursor.execute("create table t1(n int, s varchar(10))") self.cursor.execute("insert into t1 values (1, 'a')") row = self.cursor.execute("select n, s from t1").fetchone() self.assertNotEqual(row, None) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.execute("insert into t2 values (?, ?)", row) def test_row_executemany(self): "Ensure we can use a Row object as a parameter to executemany" self.cursor.execute("create table t1(n int, s varchar(10))") for i in range(3): self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) rows = self.cursor.execute("select n, s from t1").fetchall() self.assertNotEqual(len(rows), 0) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.executemany("insert into t2 values (?, ?)", rows) def test_description(self): "Ensure cursor.description is correct" self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") self.cursor.execute("select * from t1") # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the # items I do know. # int t = self.cursor.description[0] self.assertEqual(t[0], 'n') self.assertEqual(t[1], int) self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # varchar(8) t = self.cursor.description[1] self.assertEqual(t[0], 's') self.assertEqual(t[1], str) self.assertEqual(t[4], 8) # precision self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # decimal(5, 2) t = self.cursor.description[2] self.assertEqual(t[0], 'd') self.assertEqual(t[1], Decimal) self.assertEqual(t[4], 5) # precision self.assertEqual(t[5], 2) # scale self.assertEqual(t[6], True) # nullable def test_cursor_messages_with_print(self): """ Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement. """ # self.cursor is used in setUp, hence is not brand new at this point brand_new_cursor = self.cnxn.cursor() self.assertIsNone(brand_new_cursor.messages) # SQL Server PRINT statements are never more than 8000 characters # https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks for msg in ('hello world', 'ABCDEFGHIJ' * 800): self.cursor.execute("PRINT '{}'".format(msg)) messages = self.cursor.messages self.assertTrue(type(messages) is list) self.assertEqual(len(messages), 1) self.assertTrue(type(messages[0]) is tuple) self.assertEqual(len(messages[0]), 2) self.assertTrue(type(messages[0][0]) is unicode) self.assertTrue(type(messages[0][1]) is unicode) self.assertEqual('[01000] (0)', messages[0][0]) self.assertTrue(messages[0][1].endswith(msg)) def test_cursor_messages_with_stored_proc(self): """ Complex scenario to test the Cursor.messages attribute. """ self.cursor.execute(""" CREATE OR ALTER PROCEDURE test_cursor_messages AS BEGIN SET NOCOUNT ON; PRINT 'Message 1a'; PRINT 'Message 1b'; SELECT N'Field 1a' AS F UNION ALL SELECT N'Field 1b'; SELECT N'Field 2a' AS F UNION ALL SELECT N'Field 2b'; PRINT 'Message 2a'; PRINT 'Message 2b'; END """) # result set 1 self.cursor.execute("EXEC test_cursor_messages") rows = [tuple(r) for r in self.cursor.fetchall()] # convert pyodbc.Row objects for ease of use self.assertEqual(len(rows), 2) self.assertSequenceEqual(rows, [('Field 1a', ), ('Field 1b', )]) self.assertEqual(len(self.cursor.messages), 2) self.assertTrue(self.cursor.messages[0][1].endswith('Message 1a')) self.assertTrue(self.cursor.messages[1][1].endswith('Message 1b')) # result set 2 self.assertTrue(self.cursor.nextset()) rows = [tuple(r) for r in self.cursor.fetchall()] # convert pyodbc.Row objects for ease of use self.assertEqual(len(rows), 2) self.assertSequenceEqual(rows, [('Field 2a', ), ('Field 2b', )]) self.assertEqual(self.cursor.messages, []) # result set 3 self.assertTrue(self.cursor.nextset()) with self.assertRaises(pyodbc.ProgrammingError): self.cursor.fetchall() self.assertEqual(len(self.cursor.messages), 2) self.assertTrue(self.cursor.messages[0][1].endswith('Message 2a')) self.assertTrue(self.cursor.messages[1][1].endswith('Message 2b')) # result set 4 (which shouldn't exist) self.assertFalse(self.cursor.nextset()) with self.assertRaises(pyodbc.ProgrammingError): self.cursor.fetchall() self.assertEqual(self.cursor.messages, []) def test_none_param(self): "Ensure None can be used for params other than the first" # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the # first column, but did not work for later columns. # # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, # binary/varbinary won't allow an implicit conversion. self.cursor.execute("create table t1(n int, blob varbinary(max))") self.cursor.execute("insert into t1 values (1, newid())") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 1) self.assertEqual(type(row.blob), bytearray) sql = "update t1 set n=?, blob=?" try: self.cursor.execute(sql, 2, None) except pyodbc.DataError: if self.driver_type_is('freetds'): # FREETDS_KNOWN_ISSUE # # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so # pyodbc can't call SQLDescribeParam to get the correct parameter type. # This can lead to errors being returned from SQL Server when sp_prepexec is called, # e.g., "Implicit conversion from data type varchar to varbinary(max) is not allowed." # # So at least verify that the user can manually specify the parameter type self.cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) self.cursor.execute(sql, 2, None) else: raise row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 2) self.assertEqual(row.blob, None) def test_output_conversion(self): def convert(value): # `value` will be a string. We'll simply add an X at the beginning at the end. return 'X' + value + 'X' self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) self.cursor.execute("create table t1(n int, v varchar(10))") self.cursor.execute("insert into t1 values (1, '123.45')") value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') # Now clear the conversions and try again. There should be no Xs this time. self.cnxn.clear_output_converters() value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') def test_too_large(self): """Ensure error raised if insert fails due to truncation""" value = 'x' * 1000 self.cursor.execute("create table t1(s varchar(800))") def test(): self.cursor.execute("insert into t1 values (?)", value) # different versions of SQL Server generate different errors self.assertRaises((pyodbc.DataError, pyodbc.ProgrammingError), test) def test_geometry_null_insert(self): def convert(value): return value self.cnxn.add_output_converter(-151, convert) # -151 is SQL Server's geometry self.cursor.execute("create table t1(n int, v geometry)") self.cursor.execute("insert into t1 values (?, ?)", 1, None) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, None) self.cnxn.clear_output_converters() def test_login_timeout(self): # This can only test setting since there isn't a way to cause it to block on the server side. cnxns = pyodbc.connect(self.connection_string, timeout=2) def test_row_equal(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test')") row1 = self.cursor.execute("select n, s from t1").fetchone() row2 = self.cursor.execute("select n, s from t1").fetchone() b = (row1 == row2) self.assertEqual(b, True) def test_row_gtlt(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test1')") self.cursor.execute("insert into t1 values (1, 'test2')") rows = self.cursor.execute("select n, s from t1 order by s").fetchall() self.assertTrue(rows[0] < rows[1]) self.assertTrue(rows[0] <= rows[1]) self.assertTrue(rows[1] > rows[0]) self.assertTrue(rows[1] >= rows[0]) self.assertTrue(rows[0] != rows[1]) rows = list(rows) rows.sort() # uses < def test_context_manager_success(self): """ Ensure a successful with statement causes a commit. """ self.cursor.execute("create table t1(n int)") self.cnxn.commit() with pyodbc.connect(self.connection_string) as cnxn: cursor = cnxn.cursor() cursor.execute("insert into t1 values (1)") cnxn = None cursor = None rows = self.cursor.execute("select n from t1").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0][0], 1) def test_context_manager_fail(self): """ Ensure an exception in a with statement causes a rollback. """ self.cursor.execute("create table t1(n int)") self.cnxn.commit() try: with pyodbc.connect(self.connection_string) as cnxn: cursor = cnxn.cursor() cursor.execute("insert into t1 values (1)") raise Exception("Testing failure") except Exception: pass cnxn = None cursor = None count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, 0) def test_cursor_context_manager_success(self): """ Ensure a successful with statement using a cursor causes a commit. """ self.cursor.execute("create table t1(n int)") self.cnxn.commit() with pyodbc.connect(self.connection_string).cursor() as cursor: cursor.execute("insert into t1 values (1)") cursor = None rows = self.cursor.execute("select n from t1").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0][0], 1) def test_cursor_context_manager_fail(self): """ Ensure an exception in a with statement using a cursor causes a rollback. """ self.cursor.execute("create table t1(n int)") self.cnxn.commit() try: with pyodbc.connect(self.connection_string).cursor() as cursor: cursor.execute("insert into t1 values (1)") raise Exception("Testing failure") except Exception: pass cursor = None count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, 0) def test_untyped_none(self): # From issue 129 value = self.cursor.execute("select ?", None).fetchone()[0] self.assertEqual(value, None) def test_large_update_nodata(self): self.cursor.execute('create table t1(a varbinary(max))') hundredkb = bytearray('x'*100*1024) self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) def test_func_param(self): self.cursor.execute(''' create function func1 (@testparam varchar(4)) returns @rettest table (param varchar(4)) as begin insert @rettest select @testparam return end ''') self.cnxn.commit() value = self.cursor.execute("select * from func1(?)", 'test').fetchone()[0] self.assertEqual(value, 'test') def test_no_fetch(self): # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to # confuse the driver. self.cursor.execute('select 1') self.cursor.execute('select 1') self.cursor.execute('select 1') def test_drivers(self): drivers = pyodbc.drivers() self.assertEqual(list, type(drivers)) self.assertTrue(len(drivers) > 0) m = re.search('DRIVER={?([^}]+?)}?;', self.connection_string, re.IGNORECASE) current = m.group(1) self.assertTrue(current in drivers) def test_prepare_cleanup(self): # When statement is prepared, it is kept in case the next execute uses the same statement. This must be # removed when a non-execute statement is used that returns results, such as SQLTables. self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") self.cursor.fetchone() self.cursor.tables("bogus") self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") self.cursor.fetchone() def test_exc_integrity(self): "Make sure an IntegretyError is raised" # This is really making sure we are properly encoding and comparing the SQLSTATEs. self.cursor.execute("create table t1(s1 varchar(10) primary key)") self.cursor.execute("insert into t1 values ('one')") self.assertRaises(pyodbc.IntegrityError, self.cursor.execute, "insert into t1 values ('one')") def test_emoticons_as_parameter(self): # https://github.com/mkleehammer/pyodbc/issues/423 # # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number # of characters. Ensure it works even with 4-byte characters. # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = "x \U0001F31C z" self.cursor.execute("create table t1(s varchar(100))") self.cursor.execute("insert into t1 values (?)", v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def test_emoticons_as_literal(self): # similar to `test_emoticons_as_parameter`, above, except for Unicode literal # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = "x \U0001F31C z" self.cursor.execute("create table t1(s varchar(100))") self.cursor.execute("insert into t1 values (N'%s')" % v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def _test_tvp(self, diff_schema): # https://github.com/mkleehammer/pyodbc/issues/290 # # pyodbc supports queries with table valued parameters in sql server # if self.handle_known_issues_for('freetds', print_reminder=True): warn('FREETDS_KNOWN_ISSUE - test_tvp: test cancelled.') return procname = 'SelectTVP' typename = 'TestTVP' if diff_schema: schemaname = 'myschema' procname = schemaname + '.' + procname typenameonly = typename typename = schemaname + '.' + typename # (Don't use "if exists" since older SQL Servers don't support it.) try: self.cursor.execute("drop procedure " + procname) except: pass try: self.cursor.execute("drop type " + typename) except: pass if diff_schema: try: self.cursor.execute("drop schema " + schemaname) except: pass self.cursor.commit() if diff_schema: self.cursor.execute("CREATE SCHEMA myschema") self.cursor.commit() query = "CREATE TYPE %s AS TABLE("\ "c01 VARCHAR(255),"\ "c02 VARCHAR(MAX),"\ "c03 VARBINARY(255),"\ "c04 VARBINARY(MAX),"\ "c05 BIT,"\ "c06 DATE,"\ "c07 TIME,"\ "c08 DATETIME2(5),"\ "c09 BIGINT,"\ "c10 FLOAT,"\ "c11 NUMERIC(38, 24),"\ "c12 UNIQUEIDENTIFIER)" % typename self.cursor.execute(query) self.cursor.commit() self.cursor.execute("CREATE PROCEDURE %s @TVP %s READONLY AS SELECT * FROM @TVP;" % (procname, typename)) self.cursor.commit() long_string = '' long_bytearray = [] for i in range(255): long_string += chr((i % 95) + 32) long_bytearray.append(i % 255) very_long_string = '' very_long_bytearray = [] for i in range(2000000): very_long_string += chr((i % 95) + 32) very_long_bytearray.append(i % 255) c01 = ['abc', '', long_string] c02 = ['abc', '', very_long_string] c03 = [bytearray([0xD1, 0xCE, 0xFA, 0xCE]), bytearray([0x00, 0x01, 0x02, 0x03, 0x04]), bytearray(long_bytearray)] c04 = [bytearray([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), bytearray([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), bytearray(very_long_bytearray)] c05 = [1, 0, 1] c06 = [date(1997, 8, 29), date(1, 1, 1), date(9999, 12, 31)] c07 = [time(9, 13, 39), time(0, 0, 0), time(23, 59, 59)] c08 = [datetime(2018, 11, 13, 13, 33, 26, 298420), datetime(1, 1, 1, 0, 0, 0, 0), datetime(9999, 12, 31, 23, 59, 59, 999990)] c09 = [1234567, -9223372036854775808, 9223372036854775807] c10 = [3.14, -1.79E+308, 1.79E+308] c11 = [Decimal('31234567890123.141243449787580175325274'), Decimal( '0.000000000000000000000001'), Decimal('99999999999999.999999999999999999999999')] c12 = ['4FE34A93-E574-04CC-200A-353F0D1770B1', '33F7504C-2BAC-1B83-01D1-7434A7BA6A17', 'FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF'] param_array = [] for i in range (3): param_array.append([c01[i], c02[i], c03[i], c04[i], c05[i], c06[i], c07[i], c08[i], c09[i], c10[i], c11[i], c12[i]]) success = True try: p1 = [param_array] if diff_schema: p1 = [ [ typenameonly, schemaname ] + param_array ] result_array = self.cursor.execute("exec %s ?" % procname, p1).fetchall() except Exception as ex: print("Failed to execute SelectTVP") print("Exception: [" + type(ex).__name__ + "]" , ex.args) success = False else: for r in range(len(result_array)): for c in range(len(result_array[r])): if(result_array[r][c] != param_array[r][c]): print("Mismatch at row " + str(r+1) + ", column " + str(c+1) + "; expected:", param_array[r][c] , " received:", result_array[r][c]) success = False try: p1 = [[]] if diff_schema: p1 = [ [ typenameonly, schemaname ] + [] ] result_array = self.cursor.execute("exec %s ?" % procname, p1).fetchall() self.assertEqual(result_array, []) except Exception as ex: print("Failed to execute SelectTVP") print("Exception: [" + type(ex).__name__ + "]", ex.args) success = False self.assertEqual(success, True) def test_columns(self): self.cursor.execute( """ create table t1(n int, d datetime, c nvarchar(100)) """) self.cursor.columns(table=u't1') names = {row.column_name for row in self.cursor.fetchall()} assert names == {'n', 'd', 'c'}, 'names=%r' % names self.cursor.columns(table=u't1', column=u'c') row = self.cursor.fetchone() assert row.column_name == 'c' # Same tests but with str instead of unicode. self.cursor.columns(table='t1') names = {row.column_name for row in self.cursor.fetchall()} assert names == {'n', 'd', 'c'}, 'names=%r' % names self.cursor.columns(table='t1', column='c') row = self.cursor.fetchone() assert row.column_name == 'c' def test_tvp(self): self._test_tvp(False) def test_tvp_diffschema(self): self._test_tvp(True) def main(): from optparse import OptionParser parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('sqlservertests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string) print_library_info(cnxn) cnxn.close() suite = load_tests(SqlServerTestCase, options.test, connection_string) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests2/test.py0000775000175000017500000000207400000000000017206 0ustar00mkleehammermkleehammer#!/usr/bin/env python from testutils import * add_to_path() import pyodbc def main(): from optparse import OptionParser parser = OptionParser() parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('test') if not connection_string: print('no connection string') parser.print_help() raise SystemExit() else: connection_string = args[0] cnxn = pyodbc.connect(connection_string) if options.verbose: print_library_info(cnxn) cursor = cnxn.cursor() cursor.execute("select 'å' as uk, 'b' as jp") row = cursor.fetchone() print(row) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests2/test.xls0000664000175000017500000004300000000000000017353 0ustar00mkleehammermkleehammerࡱ> !  Root Entry FA>Workbook6SummaryInformation(DocumentSummaryInformation8@ \pMichael Kleehammer Ba==ZWN+8X@"1Calibri1Calibri1Calibri1Calibri1Calibri1 Calibri1Calibri14Calibri1 Calibri1Calibri1Calibri1,8Calibri18Calibri18Calibri1>Calibri14Calibri1<Calibri1?Calibri1h8Cambria1Calibri1 Calibri"$"#,##0_);\("$"#,##0\)!"$"#,##0_);[Red]\("$"#,##0\)""$"#,##0.00_);\("$"#,##0.00\)'""$"#,##0.00_);[Red]\("$"#,##0.00\)7*2_("$"* #,##0_);_("$"* \(#,##0\);_("$"* "-"_);_(@_).))_(* #,##0_);_(* \(#,##0\);_(* "-"_);_(@_)?,:_("$"* #,##0.00_);_("$"* \(#,##0.00\);_("$"* "-"??_);_(@_)6+1_(* #,##0.00_);_(* \(#,##0.00\);_(* "-"??_);_(@_)                                                                       ff + ) , *     P  P        `            a>  ||>j=}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-}  00\);_(*}-}  00\);_(*}-}  00\);_(*}-}  00\);_(*}-}  00\);_(*}-} 00\);_(*}-} 00\);_(*}A} 00\);_(*ef;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*ef ;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*L ;_(@_) }A} 00\);_(*23;_(@_) }A} 00\);_(*23;_(@_) }A} 00\);_(*23;_(@_) }A} 00\);_(*23;_(@_) }A}  00\);_(*23;_(@_) }A}! 00\);_(*23 ;_(@_) }A}" 00\);_(*;_(@_) }A}# 00\);_(*;_(@_) }A}$ 00\);_(*;_(@_) }A}% 00\);_(*;_(@_) }A}& 00\);_(*;_(@_) }A}' 00\);_(* ;_(@_) }A}( 00\);_(*;_(@_) }}) }00\);_(*;_(@_)    }}* 00\);_(*;_(@_) ??? ??? ??? ???}-}+ 00\);_(*}-}, 00\);_(*}-}- 00\);_(*}-}. 00\);_(*}-}/ 00\);_(*}A}0 a00\);_(*;_(@_) }A}1 00\);_(*;_(@_) }A}2 00\);_(*?;_(@_) }A}3 00\);_(*23;_(@_) }-}4 00\);_(*}}5 ??v00\);_(*̙;_(@_)    }A}6 }00\);_(*;_(@_) }A}7 e00\);_(*;_(@_) }}8 00\);_(*;_(@_)    }}9 ???00\);_(*;_(@_) ??? ??? ??? ???}-}: 00\);_(*}-}; 00\);_(*}U}< 00\);_(*;_(@_)  }-}= 00\);_(* 20% - Accent1M 20% - Accent1 ef % 20% - Accent2M" 20% - Accent2 ef % 20% - Accent3M& 20% - Accent3 ef % 20% - Accent4M* 20% - Accent4 ef % 20% - Accent5M. 20% - Accent5 ef % 20% - Accent6M2 20% - Accent6  ef % 40% - Accent1M 40% - Accent1 L % 40% - Accent2M# 40% - Accent2 L湸 % 40% - Accent3M' 40% - Accent3 L % 40% - Accent4M+ 40% - Accent4 L % 40% - Accent5M/ 40% - Accent5 L % 40% - Accent6M3 40% - Accent6  Lմ % 60% - Accent1M 60% - Accent1 23 % 60% - Accent2M$ 60% - Accent2 23ٗ % 60% - Accent3M( 60% - Accent3 23֚ % 60% - Accent4M, 60% - Accent4 23 % 60% - Accent5M0 60% - Accent5 23 %! 60% - Accent6M4 60% - Accent6  23 % "Accent1AAccent1 O % #Accent2A!Accent2 PM % $Accent3A%Accent3 Y % %Accent4A)Accent4 d % &Accent5A-Accent5 K % 'Accent6A1Accent6  F %(Bad9Bad  %) Calculation Calculation  }% * Check Cell Check Cell  %????????? ???+ Comma,( Comma [0]-&Currency.. Currency [0]/Explanatory TextG5Explanatory Text % 0Good;Good  a%1 Heading 1G Heading 1 I}%O2 Heading 2G Heading 2 I}%?3 Heading 3G Heading 3 I}%234 Heading 49 Heading 4 I}% 5InputuInput ̙ ??v% 6 Linked CellK Linked Cell }% 7NeutralANeutral  e%3Normal % 8Noteb Note   9OutputwOutput  ???%????????? ???:$Percent ;Title1Title I}% <TotalMTotal %OO= Warning Text? Warning Text %XTableStyleMedium9PivotStyleLight16`F/ Read DataF3Sheet2% AppendTable;  Table1; abcdefghijnumval#The values below are named 'Table1'((In Excel 2007, use Data | Name Manager)s2nums2valabcdefghijklmno. ;.,.ccB   \02  dMbP?_*+%,&ffffff?'ffffff?(?)?",B333333?333333?&<3U ,,,,,,,,, , , , ,     ~ ? ~ @ ~ @ ~ @ ~ @ ~ @ ~ @ ~  @ ~ "@ ~ $@  T>@  ggD  \45  dMbP?_*+%,&ffffff?'ffffff?(?)?",333333?333333?&<3U,,,,,,  ~ $@ ~ 4@ ~ >@ ~ D@ ~ I@  d>@ggD  Oh+'0@Hd Michael KleehammerMichael KleehammerMicrosoft Excel@b@>՜.+,0 PXl t|  CheckFree  Read DataSheet2 AppendTableTable1  Worksheets Named Ranges F&Microsoft Office Excel 2003 WorksheetBiff8Excel.Sheet.89qCompObj r././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests2/testbase.py0000775000175000017500000000146300000000000020042 0ustar00mkleehammermkleehammer import unittest _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) / len(_TESTSTR) v = _TESTSTR * c return v[:length] class TestBase(unittest.TestCase): ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1625348714.0 pyodbc-4.0.32/tests2/testutils.py0000775000175000017500000001015400000000000020265 0ustar00mkleehammermkleehammerfrom __future__ import print_function import os, sys, platform from os.path import join, dirname, abspath, basename import unittest from distutils.util import get_platform def add_to_path(): """ Prepends the build directory to the path so that newly built pyodbc libraries are used, allowing it to be tested without installing it. """ # Put the build directory into the Python path so we pick up the version we just built. # # To make this cross platform, we'll search the directories until we find the .pyd file. import imp library_exts = [ t[0] for t in imp.get_suffixes() if t[-1] == imp.C_EXTENSION ] library_names = [ 'pyodbc%s' % ext for ext in library_exts ] # Only go into directories that match our version number. dir_suffix = '%s-%s.%s' % (get_platform(), sys.version_info[0], sys.version_info[1]) build = join(dirname(dirname(abspath(__file__))), 'build') for root, dirs, files in os.walk(build): for d in dirs[:]: if not d.endswith(dir_suffix): dirs.remove(d) for name in library_names: if name in files: sys.path.insert(0, root) print('Library:', join(root, name)) return print('Did not find the pyodbc library in the build directory. Will use an installed version.') def print_library_info(cnxn): import pyodbc print('python: %s' % sys.version) print('pyodbc: %s %s' % (pyodbc.version, os.path.abspath(pyodbc.__file__))) print('odbc: %s' % cnxn.getinfo(pyodbc.SQL_ODBC_VER)) print('driver: %s %s' % (cnxn.getinfo(pyodbc.SQL_DRIVER_NAME), cnxn.getinfo(pyodbc.SQL_DRIVER_VER))) print(' supports ODBC version %s' % cnxn.getinfo(pyodbc.SQL_DRIVER_ODBC_VER)) print('os: %s' % platform.system()) print('unicode: Py_Unicode=%s SQLWCHAR=%s' % (pyodbc.UNICODE_SIZE, pyodbc.SQLWCHAR_SIZE)) cursor = cnxn.cursor() for typename in ['VARCHAR', 'WVARCHAR', 'BINARY']: t = getattr(pyodbc, 'SQL_' + typename) try: cursor.getTypeInfo(t) except pyodbc.Error as e: print('Max %s = (not supported)' % (typename, )) else: row = cursor.fetchone() print('Max %s = %s' % (typename, row and row[2] or '(not supported)')) if platform.system() == 'Windows': print(' %s' % ' '.join([s for s in platform.win32_ver() if s])) def load_tests(testclass, name, *args): """ Returns a TestSuite for tests in `testclass`. name Optional test name if you only want to run 1 test. If not provided all tests in `testclass` will be loaded. args Arguments for the test class constructor. These will be passed after the test method name. """ if name: if not name.startswith('test_'): name = 'test_%s' % name names = [ name ] else: names = [ method for method in dir(testclass) if method.startswith('test_') ] return unittest.TestSuite([ testclass(name, *args) for name in names ]) def load_setup_connection_string(section): """ Attempts to read the default connection string from the setup.cfg file. If the file does not exist or if it exists but does not contain the connection string, None is returned. If the file exists but cannot be parsed, an exception is raised. """ from os.path import exists, join, dirname, splitext, basename from ConfigParser import SafeConfigParser FILENAME = 'setup.cfg' KEY = 'connection-string' path = dirname(abspath(__file__)) while True: fqn = join(path, 'tmp', FILENAME) if exists(fqn): break parent = dirname(path) print('{} --> {}'.format(path, parent)) if parent == path: return None path = parent try: p = SafeConfigParser() p.read(fqn) except: raise SystemExit('Unable to parse %s: %s' % (path, sys.exc_info()[1])) if p.has_option(section, KEY): return p.get(section, KEY) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1625348724.0 pyodbc-4.0.32/tests2/testutils.pyc0000664000175000017500000001120100000000000020417 0ustar00mkleehammermkleehammer j`c@sddlmZddlZddlZddlZddlmZmZmZm Z ddl Z ddl m Z dZ dZdZdZdS( i(tprint_functionN(tjointdirnametabspathtbasename(t get_platformc CsPddl}g|jD]#}|d|jkr|d^q}g|D]}d|^qI}dttjdtjdf}ttttt d}xt j |D]\}}} x.|D]%} | j |s|j | qqWxG|D]?} | | krtjjd|tdt|| dSqWqWtd dS( s Prepends the build directory to the path so that newly built pyodbc libraries are used, allowing it to be tested without installing it. iNispyodbc%ss%s-%s.%sitbuildsLibrary:sWDid not find the pyodbc library in the build directory. Will use an installed version.(timpt get_suffixest C_EXTENSIONRtsyst version_infoRRRt__file__tostwalktendswithtremovetpathtinserttprint( Rttt library_extstextt library_namest dir_suffixRtroottdirstfilestdtname((s0/home/mkleehammer/dev/pyodbc/tests2/testutils.pyt add_to_paths 6$!   cCsddl}tdtjtd|jtjj|jftd|j|j td|j|j |j|j ftd|j|j tdt jtd|j|jf|j}xd d d gD]}t|d |}y|j|Wn'|jk r@}td |fqX|j}td||rf|dpidfqWt jdkrtddjgt jD]}|r|^qndS(Nis python: %sspyodbc: %s %ss odbc: %ssdriver: %s %ss! supports ODBC version %ss os: %ss"unicode: Py_Unicode=%s SQLWCHAR=%stVARCHARtWVARCHARtBINARYtSQL_sMax %s = (not supported)s Max %s = %sis(not supported)tWindowss %st (tpyodbcRR tversionR RRR tgetinfot SQL_ODBC_VERtSQL_DRIVER_NAMEtSQL_DRIVER_VERtSQL_DRIVER_ODBC_VERtplatformtsystemt UNICODE_SIZEt SQLWCHAR_SIZEtcursortgetattrt getTypeInfotErrortfetchoneRt win32_ver(tcnxnR%R0ttypenameRtetrowts((s0/home/mkleehammer/dev/pyodbc/tests2/testutils.pytprint_library_info*s& &,  (cGs|r.|jds"d|}n|g}n.gt|D]}|jdr;|^q;}tjg|D]}|||^qiS(s  Returns a TestSuite for tests in `testclass`. name Optional test name if you only want to run 1 test. If not provided all tests in `testclass` will be loaded. args Arguments for the test class constructor. These will be passed after the test method name. ttest_stest_%s(t startswithtdirtunittestt TestSuite(t testclassRtargstnamestmethod((s0/home/mkleehammer/dev/pyodbc/tests2/testutils.pyt load_testsCs   .c Cs'ddlm}m}m}m}m}ddlm}d}d}|tt } xdt r|| d|} || rPn|| } t dj | | | | krd S| } qYWy|} | j| Wn'td| tjd fnX| j||r#| j||Sd S( s Attempts to read the default connection string from the setup.cfg file. If the file does not exist or if it exists but does not contain the connection string, None is returned. If the file exists but cannot be parsed, an exception is raised. i(texistsRRtsplitextR(tSafeConfigParsers setup.cfgsconnection-stringttmps {} --> {}sUnable to parse %s: %siN(tos.pathRFRRRGRt ConfigParserRHRR tTrueRtformattNonetreadt SystemExitR texc_infot has_optiontget( tsectionRFRRRGRRHtFILENAMEtKEYRtfqntparenttp((s0/home/mkleehammer/dev/pyodbc/tests2/testutils.pytload_setup_connection_stringXs*(      $(t __future__RR R R,RJRRRRR?tdistutils.utilRRR;RERZ(((s0/home/mkleehammer/dev/pyodbc/tests2/testutils.pyts$"  "  ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1629404586.3039672 pyodbc-4.0.32/tests3/0000775000175000017500000000000000000000000015650 5ustar00mkleehammermkleehammer././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629144457.0 pyodbc-4.0.32/tests3/accesstests.py0000664000175000017500000005230400000000000020552 0ustar00mkleehammermkleehammer#!/usr/bin/python usage="""\ usage: %prog [options] filename Unit tests for Microsoft Access These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. To run, pass the file EXTENSION of an Access database on the command line: accesstests accdb An empty Access 2000 database (empty.mdb) or an empty Access 2007 database (empty.accdb), are automatically created for the tests. To run a single test, use the -t option: accesstests -t unicode_null accdb If you want to report an error, it would be helpful to include the driver information by using the verbose flag and redirecting the output to a file: accesstests -v accdb >& results.txt You can pass the verbose flag twice for more verbose output: accesstests -vv accdb """ # Access SQL data types: http://msdn2.microsoft.com/en-us/library/bb208866.aspx import sys, os, re import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import abspath, dirname, join import shutil from testutils import * CNXNSTRING = None _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of composed of `seed` to make a string `length` characters long. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) // len(_TESTSTR) v = _TESTSTR * c return v[:length] class AccessTestCase(unittest.TestCase): SMALL_FENCEPOST_SIZES = [ 0, 1, 254, 255 ] # text fields <= 255 LARGE_FENCEPOST_SIZES = [ 256, 270, 304, 508, 510, 511, 512, 1023, 1024, 2047, 2048, 4000, 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] CHAR_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] IMAGE_FENCEPOSTS = CHAR_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] def __init__(self, method_name): unittest.TestCase.__init__(self, method_name) def setUp(self): self.cnxn = pyodbc.connect(CNXNSTRING) self.cursor = self.cnxn.cursor() for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_closed_reflects_connection_state(self): self.assertFalse(self.cnxn.closed) self.cnxn.close() self.assertTrue(self.cnxn.closed) def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, int)) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def _test_strtype(self, sqltype, value, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or (value is None or colsize >= len(value)), 'colsize=%s value=%s' % (colsize, (value is None) and 'none' or len(value)) if colsize: sql = "create table t1(n1 int not null, s1 %s(%s), s2 %s(%s))" % (sqltype, colsize, sqltype, colsize) else: sql = "create table t1(n1 int not null, s1 %s, s2 %s)" % (sqltype, sqltype) self.cursor.execute(sql) self.cursor.execute("insert into t1 values(1, ?, ?)", (value, value)) row = self.cursor.execute("select s1, s2 from t1").fetchone() for i in range(2): v = row[i] self.assertEqual(type(v), type(value)) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) def test_varchar_null(self): self._test_strtype('varchar', None, 255) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, len(value)) t.__doc__ = 'varchar %s' % len(value) return t for value in CHAR_FENCEPOSTS: locals()['test_varchar_%s' % len(value)] = _maketest(value) # # binary # def test_null_binary(self): self._test_strtype('binary', None) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): # Convert to UTF-8 to create a byte array self._test_strtype('varbinary', value.encode('utf-8'), len(value)) t.__doc__ = 'binary %s' % len(value) return t for value in CHAR_FENCEPOSTS: locals()['test_binary_%s' % len(value)] = _maketest(value) # # # # image # # # def test_null_image(self): # self._test_strtype('image', None) # # Generate a test for each fencepost size: test_varchar_0, etc. # def _maketest(value): # def t(self): # self._test_strtype('image', value.encode('utf-8')) # t.__doc__ = 'image %s' % len(value) # return t # for value in IMAGE_FENCEPOSTS: # locals()['test_image_%s' % len(value)] = _maketest(value) # # memo # def test_null_memo(self): self._test_strtype('memo', None) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('memo', value) t.__doc__ = 'Unicode to memo %s' % len(value) return t for value in IMAGE_FENCEPOSTS: locals()['test_memo_%s' % len(value)] = _maketest(value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_unicode_query(self): self.cursor.execute(u"select 1") def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # date, time, datetime # def test_datetime(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(value, result) # # ints and floats # def test_int(self): value = 1234 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_smallint(self): value = 32767 self.cursor.execute("create table t1(n smallint)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_real(self): value = 1234.5 self.cursor.execute("create table t1(n real)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_real(self): value = -200.5 self.cursor.execute("create table t1(n real)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) def test_float(self): value = 1234.567 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_float(self): value = -200.5 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) def test_tinyint(self): self.cursor.execute("create table t1(n tinyint)") value = 10 self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(type(result), type(value)) self.assertEqual(value, result) # # decimal & money # def test_decimal(self): value = Decimal('12345.6789') self.cursor.execute("create table t1(n numeric(10,4))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_money(self): self.cursor.execute("create table t1(n money)") value = Decimal('1234.45') self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(type(result), type(value)) self.assertEqual(value, result) def test_negative_decimal_scale(self): value = Decimal('-10.0010') self.cursor.execute("create table t1(d numeric(19,4))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) # # bit # def test_bit(self): self.cursor.execute("create table t1(b bit)") value = True self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select b from t1").fetchone()[0] self.assertEqual(type(result), bool) self.assertEqual(value, result) def test_bit_null(self): self.cursor.execute("create table t1(b bit)") value = None self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select b from t1").fetchone()[0] self.assertEqual(type(result), bool) self.assertEqual(False, result) def test_guid(self): value = u"de2ac9c6-8676-4b0b-b8a6-217a8580cbee" self.cursor.execute("create table t1(g1 uniqueidentifier)") self.cursor.execute("insert into t1 values (?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), type(value)) self.assertEqual(len(v), len(value)) # # rowcount # def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) def test_rowcount_select(self): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a select statement, so we'll test for that behavior. This is valid behavior according to the DB API specification, but people don't seem to like it. """ self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEqual(self.cursor.rowcount, -1) rows = self.cursor.fetchall() self.assertEqual(len(rows), count) self.assertEqual(self.cursor.rowcount, -1) def test_rowcount_reset(self): "Ensure rowcount is reset to -1" self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.assertEqual(self.cursor.rowcount, 1) self.cursor.execute("create table t2(i int)") self.assertEqual(self.cursor.rowcount, -1) # # Misc # def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_concatenation(self): v2 = u'0123456789' * 25 v3 = u'9876543210' * 25 value = v2 + 'x' + v3 self.cursor.execute("create table t1(c2 varchar(250), c3 varchar(250))") self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) row = self.cursor.execute("select c2 + 'x' + c3 from t1").fetchone() self.assertEqual(row[0], value) def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(CNXNSTRING, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def main(): from argparse import ArgumentParser parser = ArgumentParser(usage=usage) parser.add_argument("-v", "--verbose", default=0, action="count", help="Increment test verbosity (can be used multiple times)") parser.add_argument("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_argument("-t", "--test", help="Run only the named test") parser.add_argument('type', choices=['accdb', 'mdb'], help='Which type of file to test') args = parser.parse_args() DRIVERS = { 'accdb': 'Microsoft Access Driver (*.mdb, *.accdb)', 'mdb': 'Microsoft Access Driver (*.mdb)' } here = dirname(abspath(__file__)) src = join(here, 'empty.' + args.type) dest = join(here, 'test.' + args.type) shutil.copy(src, dest) global CNXNSTRING CNXNSTRING = 'DRIVER={%s};DBQ=%s;ExtendedAnsiSQL=1' % (DRIVERS[args.type], dest) print(CNXNSTRING) if args.verbose: cnxn = pyodbc.connect(CNXNSTRING) print_library_info(cnxn) cnxn.close() suite = load_tests(AccessTestCase, args.test) testRunner = unittest.TextTestRunner(verbosity=args.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests3/dbapi20.py0000664000175000017500000007530700000000000017457 0ustar00mkleehammermkleehammer#!/usr/bin/env python ''' Python DB API 2.0 driver compliance unit test suite. This software is Public Domain and may be used without restrictions. "Now we have booze and barflies entering the discussion, plus rumours of DBAs on drugs... and I won't tell you what flashes through my mind each time I read the subject line with 'Anal Compliance' in it. All around this is turning out to be a thoroughly unwholesome unit test." -- Ian Bicking ''' __rcs_id__ = '$Id: dbapi20.py,v 1.10 2003/10/09 03:14:14 zenzen Exp $' __version__ = '$Revision: 1.10 $'[11:-2] __author__ = 'Stuart Bishop ' import unittest import time # $Log: dbapi20.py,v $ # Revision 1.10 2003/10/09 03:14:14 zenzen # Add test for DB API 2.0 optional extension, where database exceptions # are exposed as attributes on the Connection object. # # Revision 1.9 2003/08/13 01:16:36 zenzen # Minor tweak from Stefan Fleiter # # Revision 1.8 2003/04/10 00:13:25 zenzen # Changes, as per suggestions by M.-A. Lemburg # - Add a table prefix, to ensure namespace collisions can always be avoided # # Revision 1.7 2003/02/26 23:33:37 zenzen # Break out DDL into helper functions, as per request by David Rushby # # Revision 1.6 2003/02/21 03:04:33 zenzen # Stuff from Henrik Ekelund: # added test_None # added test_nextset & hooks # # Revision 1.5 2003/02/17 22:08:43 zenzen # Implement suggestions and code from Henrik Eklund - test that cursor.arraysize # defaults to 1 & generic cursor.callproc test added # # Revision 1.4 2003/02/15 00:16:33 zenzen # Changes, as per suggestions and bug reports by M.-A. Lemburg, # Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar # - Class renamed # - Now a subclass of TestCase, to avoid requiring the driver stub # to use multiple inheritance # - Reversed the polarity of buggy test in test_description # - Test exception heirarchy correctly # - self.populate is now self._populate(), so if a driver stub # overrides self.ddl1 this change propogates # - VARCHAR columns now have a width, which will hopefully make the # DDL even more portible (this will be reversed if it causes more problems) # - cursor.rowcount being checked after various execute and fetchXXX methods # - Check for fetchall and fetchmany returning empty lists after results # are exhausted (already checking for empty lists if select retrieved # nothing # - Fix bugs in test_setoutputsize_basic and test_setinputsizes # class DatabaseAPI20Test(unittest.TestCase): ''' Test a database self.driver for DB API 2.0 compatibility. This implementation tests Gadfly, but the TestCase is structured so that other self.drivers can subclass this test case to ensure compiliance with the DB-API. It is expected that this TestCase may be expanded in the future if ambiguities or edge conditions are discovered. The 'Optional Extensions' are not yet being tested. self.drivers should subclass this test, overriding setUp, tearDown, self.driver, connect_args and connect_kw_args. Class specification should be as follows: import dbapi20 class mytest(dbapi20.DatabaseAPI20Test): [...] Don't 'import DatabaseAPI20Test from dbapi20', or you will confuse the unit tester - just 'import dbapi20'. ''' # The self.driver module. This should be the module where the 'connect' # method is to be found driver = None connect_args = () # List of arguments to pass to connect connect_kw_args = {} # Keyword arguments for connect table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix xddl1 = 'drop table %sbooze' % table_prefix xddl2 = 'drop table %sbarflys' % table_prefix lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase # Some drivers may need to override these helpers, for example adding # a 'commit' after the execute. def executeDDL1(self,cursor): cursor.execute(self.ddl1) def executeDDL2(self,cursor): cursor.execute(self.ddl2) def setUp(self): ''' self.drivers should override this method to perform required setup if any is necessary, such as creating the database. ''' pass def tearDown(self): ''' self.drivers should override this method to perform required cleanup if any is necessary, such as deleting the test database. The default drops the tables that may be created. ''' con = self._connect() try: cur = con.cursor() for i, ddl in enumerate((self.xddl1,self.xddl2)): try: cur.execute(ddl) con.commit() except self.driver.Error: # Assume table didn't exist. Other tests will check if # execute is busted. pass finally: con.close() def _connect(self): try: return self.driver.connect( *self.connect_args,**self.connect_kw_args ) except AttributeError: self.fail("No connect method found in self.driver module") def test_connect(self): con = self._connect() con.close() def test_apilevel(self): try: # Must exist apilevel = self.driver.apilevel # Must equal 2.0 self.assertEqual(apilevel,'2.0') except AttributeError: self.fail("Driver doesn't define apilevel") def test_threadsafety(self): try: # Must exist threadsafety = self.driver.threadsafety # Must be a valid value self.assertTrue(threadsafety in (0,1,2,3)) except AttributeError: self.fail("Driver doesn't define threadsafety") def test_paramstyle(self): try: # Must exist paramstyle = self.driver.paramstyle # Must be a valid value self.assertTrue(paramstyle in ( 'qmark','numeric','named','format','pyformat' )) except AttributeError: self.fail("Driver doesn't define paramstyle") def test_Exceptions(self): # Make sure required exceptions exist, and are in the # defined heirarchy. self.assertTrue(issubclass(self.driver.Warning,StandardError)) self.assertTrue(issubclass(self.driver.Error,StandardError)) self.assertTrue( issubclass(self.driver.InterfaceError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.DatabaseError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.OperationalError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.IntegrityError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.InternalError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.ProgrammingError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.NotSupportedError,self.driver.Error) ) def test_ExceptionsAsConnectionAttributes(self): # OPTIONAL EXTENSION # Test for the optional DB API 2.0 extension, where the exceptions # are exposed as attributes on the Connection object # I figure this optional extension will be implemented by any # driver author who is using this test suite, so it is enabled # by default. con = self._connect() drv = self.driver self.assertTrue(con.Warning is drv.Warning) self.assertTrue(con.Error is drv.Error) self.assertTrue(con.InterfaceError is drv.InterfaceError) self.assertTrue(con.DatabaseError is drv.DatabaseError) self.assertTrue(con.OperationalError is drv.OperationalError) self.assertTrue(con.IntegrityError is drv.IntegrityError) self.assertTrue(con.InternalError is drv.InternalError) self.assertTrue(con.ProgrammingError is drv.ProgrammingError) self.assertTrue(con.NotSupportedError is drv.NotSupportedError) def test_commit(self): con = self._connect() try: # Commit must work, even if it doesn't do anything con.commit() finally: con.close() def test_rollback(self): con = self._connect() # If rollback is defined, it should either work or throw # the documented exception if hasattr(con,'rollback'): try: con.rollback() except self.driver.NotSupportedError: pass def test_cursor(self): con = self._connect() try: cur = con.cursor() finally: con.close() def test_cursor_isolation(self): con = self._connect() try: # Make sure cursors created from the same connection have # the documented transaction isolation level cur1 = con.cursor() cur2 = con.cursor() self.executeDDL1(cur1) cur1.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) cur2.execute("select name from %sbooze" % self.table_prefix) booze = cur2.fetchall() self.assertEqual(len(booze),1) self.assertEqual(len(booze[0]),1) self.assertEqual(booze[0][0],'Victoria Bitter') finally: con.close() def test_description(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) self.assertEqual(cur.description,None, 'cursor.description should be none after executing a ' 'statement that can return no rows (such as DDL)' ) cur.execute('select name from %sbooze' % self.table_prefix) self.assertEqual(len(cur.description),1, 'cursor.description describes too many columns' ) self.assertEqual(len(cur.description[0]),7, 'cursor.description[x] tuples must have 7 elements' ) self.assertEqual(cur.description[0][0].lower(),'name', 'cursor.description[x][0] must return column name' ) self.assertEqual(cur.description[0][1],self.driver.STRING, 'cursor.description[x][1] must return column type. Got %r' % cur.description[0][1] ) # Make sure self.description gets reset self.executeDDL2(cur) self.assertEqual(cur.description,None, 'cursor.description not being set to None when executing ' 'no-result statements (eg. DDL)' ) finally: con.close() def test_rowcount(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) self.assertEqual(cur.rowcount,-1, 'cursor.rowcount should be -1 after executing no-result ' 'statements' ) cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertTrue(cur.rowcount in (-1,1), 'cursor.rowcount should == number or rows inserted, or ' 'set to -1 after executing an insert statement' ) cur.execute("select name from %sbooze" % self.table_prefix) self.assertTrue(cur.rowcount in (-1,1), 'cursor.rowcount should == number of rows returned, or ' 'set to -1 after executing a select statement' ) self.executeDDL2(cur) self.assertEqual(cur.rowcount,-1, 'cursor.rowcount not being reset to -1 after executing ' 'no-result statements' ) finally: con.close() lower_func = 'lower' def test_callproc(self): con = self._connect() try: cur = con.cursor() if self.lower_func and hasattr(cur,'callproc'): r = cur.callproc(self.lower_func,('FOO',)) self.assertEqual(len(r),1) self.assertEqual(r[0],'FOO') r = cur.fetchall() self.assertEqual(len(r),1,'callproc produced no result set') self.assertEqual(len(r[0]),1, 'callproc produced invalid result set' ) self.assertEqual(r[0][0],'foo', 'callproc produced invalid results' ) finally: con.close() def test_close(self): con = self._connect() try: cur = con.cursor() finally: con.close() # cursor.execute should raise an Error if called after connection # closed self.assertRaises(self.driver.Error,self.executeDDL1,cur) # connection.commit should raise an Error if called after connection' # closed.' self.assertRaises(self.driver.Error,con.commit) # connection.close should raise an Error if called more than once self.assertRaises(self.driver.Error,con.close) def test_execute(self): con = self._connect() try: cur = con.cursor() self._paraminsert(cur) finally: con.close() def _paraminsert(self,cur): self.executeDDL1(cur) cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertTrue(cur.rowcount in (-1,1)) if self.driver.paramstyle == 'qmark': cur.execute( 'insert into %sbooze values (?)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'numeric': cur.execute( 'insert into %sbooze values (:1)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'named': cur.execute( 'insert into %sbooze values (:beer)' % self.table_prefix, {'beer':"Cooper's"} ) elif self.driver.paramstyle == 'format': cur.execute( 'insert into %sbooze values (%%s)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'pyformat': cur.execute( 'insert into %sbooze values (%%(beer)s)' % self.table_prefix, {'beer':"Cooper's"} ) else: self.fail('Invalid paramstyle') self.assertTrue(cur.rowcount in (-1,1)) cur.execute('select name from %sbooze' % self.table_prefix) res = cur.fetchall() self.assertEqual(len(res),2,'cursor.fetchall returned too few rows') beers = [res[0][0],res[1][0]] beers.sort() self.assertEqual(beers[0],"Cooper's", 'cursor.fetchall retrieved incorrect data, or data inserted ' 'incorrectly' ) self.assertEqual(beers[1],"Victoria Bitter", 'cursor.fetchall retrieved incorrect data, or data inserted ' 'incorrectly' ) def test_executemany(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) largs = [ ("Cooper's",) , ("Boag's",) ] margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ] if self.driver.paramstyle == 'qmark': cur.executemany( 'insert into %sbooze values (?)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'numeric': cur.executemany( 'insert into %sbooze values (:1)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'named': cur.executemany( 'insert into %sbooze values (:beer)' % self.table_prefix, margs ) elif self.driver.paramstyle == 'format': cur.executemany( 'insert into %sbooze values (%%s)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'pyformat': cur.executemany( 'insert into %sbooze values (%%(beer)s)' % ( self.table_prefix ), margs ) else: self.fail('Unknown paramstyle') self.assertTrue(cur.rowcount in (-1,2), 'insert using cursor.executemany set cursor.rowcount to ' 'incorrect value %r' % cur.rowcount ) cur.execute('select name from %sbooze' % self.table_prefix) res = cur.fetchall() self.assertEqual(len(res),2, 'cursor.fetchall retrieved incorrect number of rows' ) beers = [res[0][0],res[1][0]] beers.sort() self.assertEqual(beers[0],"Boag's",'incorrect data retrieved') self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved') finally: con.close() def test_fetchone(self): con = self._connect() try: cur = con.cursor() # cursor.fetchone should raise an Error if called before # executing a select-type query self.assertRaises(self.driver.Error,cur.fetchone) # cursor.fetchone should raise an Error if called after # executing a query that cannnot return rows self.executeDDL1(cur) self.assertRaises(self.driver.Error,cur.fetchone) cur.execute('select name from %sbooze' % self.table_prefix) self.assertEqual(cur.fetchone(),None, 'cursor.fetchone should return None if a query retrieves ' 'no rows' ) self.assertTrue(cur.rowcount in (-1,0)) # cursor.fetchone should raise an Error if called after # executing a query that cannnot return rows cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertRaises(self.driver.Error,cur.fetchone) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchone() self.assertEqual(len(r),1, 'cursor.fetchone should have retrieved a single row' ) self.assertEqual(r[0],'Victoria Bitter', 'cursor.fetchone retrieved incorrect data' ) self.assertEqual(cur.fetchone(),None, 'cursor.fetchone should return None if no more rows available' ) self.assertTrue(cur.rowcount in (-1,1)) finally: con.close() samples = [ 'Carlton Cold', 'Carlton Draft', 'Mountain Goat', 'Redback', 'Victoria Bitter', 'XXXX' ] def _populate(self): ''' Return a list of sql commands to setup the DB for the fetch tests. ''' populate = [ "insert into %sbooze values ('%s')" % (self.table_prefix,s) for s in self.samples ] return populate def test_fetchmany(self): con = self._connect() try: cur = con.cursor() # cursor.fetchmany should raise an Error if called without #issuing a query self.assertRaises(self.driver.Error,cur.fetchmany,4) self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchmany() self.assertEqual(len(r),1, 'cursor.fetchmany retrieved incorrect number of rows, ' 'default of arraysize is one.' ) cur.arraysize=10 r = cur.fetchmany(3) # Should get 3 rows self.assertEqual(len(r),3, 'cursor.fetchmany retrieved incorrect number of rows' ) r = cur.fetchmany(4) # Should get 2 more self.assertEqual(len(r),2, 'cursor.fetchmany retrieved incorrect number of rows' ) r = cur.fetchmany(4) # Should be an empty sequence self.assertEqual(len(r),0, 'cursor.fetchmany should return an empty sequence after ' 'results are exhausted' ) self.assertTrue(cur.rowcount in (-1,6)) # Same as above, using cursor.arraysize cur.arraysize=4 cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchmany() # Should get 4 rows self.assertEqual(len(r),4, 'cursor.arraysize not being honoured by fetchmany' ) r = cur.fetchmany() # Should get 2 more self.assertEqual(len(r),2) r = cur.fetchmany() # Should be an empty sequence self.assertEqual(len(r),0) self.assertTrue(cur.rowcount in (-1,6)) cur.arraysize=6 cur.execute('select name from %sbooze' % self.table_prefix) rows = cur.fetchmany() # Should get all rows self.assertTrue(cur.rowcount in (-1,6)) self.assertEqual(len(rows),6) self.assertEqual(len(rows),6) rows = [r[0] for r in rows] rows.sort() # Make sure we get the right data back out for i in range(0,6): self.assertEqual(rows[i],self.samples[i], 'incorrect data retrieved by cursor.fetchmany' ) rows = cur.fetchmany() # Should return an empty list self.assertEqual(len(rows),0, 'cursor.fetchmany should return an empty sequence if ' 'called after the whole result set has been fetched' ) self.assertTrue(cur.rowcount in (-1,6)) self.executeDDL2(cur) cur.execute('select name from %sbarflys' % self.table_prefix) r = cur.fetchmany() # Should get empty sequence self.assertEqual(len(r),0, 'cursor.fetchmany should return an empty sequence if ' 'query retrieved no rows' ) self.assertTrue(cur.rowcount in (-1,0)) finally: con.close() def test_fetchall(self): con = self._connect() try: cur = con.cursor() # cursor.fetchall should raise an Error if called # without executing a query that may return rows (such # as a select) self.assertRaises(self.driver.Error, cur.fetchall) self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) # cursor.fetchall should raise an Error if called # after executing a a statement that cannot return rows self.assertRaises(self.driver.Error,cur.fetchall) cur.execute('select name from %sbooze' % self.table_prefix) rows = cur.fetchall() self.assertTrue(cur.rowcount in (-1,len(self.samples))) self.assertEqual(len(rows),len(self.samples), 'cursor.fetchall did not retrieve all rows' ) rows = [r[0] for r in rows] rows.sort() for i in range(0,len(self.samples)): self.assertEqual(rows[i],self.samples[i], 'cursor.fetchall retrieved incorrect rows' ) rows = cur.fetchall() self.assertEqual( len(rows),0, 'cursor.fetchall should return an empty list if called ' 'after the whole result set has been fetched' ) self.assertTrue(cur.rowcount in (-1,len(self.samples))) self.executeDDL2(cur) cur.execute('select name from %sbarflys' % self.table_prefix) rows = cur.fetchall() self.assertTrue(cur.rowcount in (-1,0)) self.assertEqual(len(rows),0, 'cursor.fetchall should return an empty list if ' 'a select query returns no rows' ) finally: con.close() def test_mixedfetch(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) cur.execute('select name from %sbooze' % self.table_prefix) rows1 = cur.fetchone() rows23 = cur.fetchmany(2) rows4 = cur.fetchone() rows56 = cur.fetchall() self.assertTrue(cur.rowcount in (-1,6)) self.assertEqual(len(rows23),2, 'fetchmany returned incorrect number of rows' ) self.assertEqual(len(rows56),2, 'fetchall returned incorrect number of rows' ) rows = [rows1[0]] rows.extend([rows23[0][0],rows23[1][0]]) rows.append(rows4[0]) rows.extend([rows56[0][0],rows56[1][0]]) rows.sort() for i in range(0,len(self.samples)): self.assertEqual(rows[i],self.samples[i], 'incorrect data retrieved or inserted' ) finally: con.close() def help_nextset_setUp(self,cur): ''' Should create a procedure called deleteme that returns two result sets, first the number of rows in booze then "name from booze" ''' raise NotImplementedError,'Helper not implemented' #sql=""" # create procedure deleteme as # begin # select count(*) from booze # select name from booze # end #""" #cur.execute(sql) def help_nextset_tearDown(self,cur): 'If cleaning up is needed after nextSetTest' raise NotImplementedError,'Helper not implemented' #cur.execute("drop procedure deleteme") def test_nextset(self): con = self._connect() try: cur = con.cursor() if not hasattr(cur,'nextset'): return try: self.executeDDL1(cur) sql=self._populate() for sql in self._populate(): cur.execute(sql) self.help_nextset_setUp(cur) cur.callproc('deleteme') numberofrows=cur.fetchone() assert numberofrows[0]== len(self.samples) assert cur.nextset() names=cur.fetchall() assert len(names) == len(self.samples) s=cur.nextset() assert s == None,'No more return sets, should return None' finally: self.help_nextset_tearDown(cur) finally: con.close() def test_nextset(self): raise NotImplementedError,'Drivers need to override this test' def test_arraysize(self): # Not much here - rest of the tests for this are in test_fetchmany con = self._connect() try: cur = con.cursor() self.assertTrue(hasattr(cur,'arraysize'), 'cursor.arraysize must be defined' ) finally: con.close() def test_setinputsizes(self): con = self._connect() try: cur = con.cursor() cur.setinputsizes( (25,) ) self._paraminsert(cur) # Make sure cursor still works finally: con.close() def test_setoutputsize_basic(self): # Basic test is to make sure setoutputsize doesn't blow up con = self._connect() try: cur = con.cursor() cur.setoutputsize(1000) cur.setoutputsize(2000,0) self._paraminsert(cur) # Make sure the cursor still works finally: con.close() def test_setoutputsize(self): # Real test for setoutputsize is driver dependant raise NotImplementedError,'Driver need to override this test' def test_None(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) cur.execute('insert into %sbooze values (NULL)' % self.table_prefix) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchall() self.assertEqual(len(r),1) self.assertEqual(len(r[0]),1) self.assertEqual(r[0][0],None,'NULL value not returned as None') finally: con.close() def test_Date(self): d1 = self.driver.Date(2002,12,25) d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0))) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(d1),str(d2)) def test_Time(self): t1 = self.driver.Time(13,45,30) t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0))) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(t1),str(t2)) def test_Timestamp(self): t1 = self.driver.Timestamp(2002,12,25,13,45,30) t2 = self.driver.TimestampFromTicks( time.mktime((2002,12,25,13,45,30,0,0,0)) ) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(t1),str(t2)) def test_Binary(self): b = self.driver.Binary('Something') b = self.driver.Binary('') def test_STRING(self): self.assertTrue(hasattr(self.driver,'STRING'), 'module.STRING must be defined' ) def test_BINARY(self): self.assertTrue(hasattr(self.driver,'BINARY'), 'module.BINARY must be defined.' ) def test_NUMBER(self): self.assertTrue(hasattr(self.driver,'NUMBER'), 'module.NUMBER must be defined.' ) def test_DATETIME(self): self.assertTrue(hasattr(self.driver,'DATETIME'), 'module.DATETIME must be defined.' ) def test_ROWID(self): self.assertTrue(hasattr(self.driver,'ROWID'), 'module.ROWID must be defined.' ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests3/dbapitests.py0000664000175000017500000000267700000000000020400 0ustar00mkleehammermkleehammerimport sys import unittest from testutils import * import dbapi20 def main(): add_to_path() import pyodbc from optparse import OptionParser parser = OptionParser(usage="usage: %prog [options] connection_string") parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('dbapitests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] class test_pyodbc(dbapi20.DatabaseAPI20Test): driver = pyodbc connect_args = [ connection_string ] connect_kw_args = {} def test_nextset(self): pass def test_setoutputsize(self): pass def test_ExceptionsAsConnectionAttributes(self): pass suite = unittest.makeSuite(test_pyodbc, 'test') testRunner = unittest.TextTestRunner(verbosity=(options.verbose > 1) and 9 or 0) result = testRunner.run(suite) return result if __name__ == '__main__': sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests3/empty.accdb0000664000175000017500000114000000000000000017760 0ustar00mkleehammermkleehammerStandard ACE DBnb` Ugr@?~1y0̝cßFNa7:ޜ(t,`3{6߱nC53Sy[/|*|JQrf_Љ$g'DeFx -bT4.0=dv Y P SS  Y  l Y Y  Y Y  Y  Y  Y   Y  Y  Y  Y   Y 2Y  Y   Y   Y ConnectDatabaseDateCreateDateUpdate FlagsForeignNameIdLvLvExtraLvModule LvPropName OwnerParentIdRmtInfoLongRmtInfoShortType    YYIdParentIdName        OYabDCS  Y Y Y  Y 2ACMFInheritableObjectIdSIDX)_5 +_YObjectId YS  Y  Y bWY Y  Y 1 Y c8Y  Y ,AttributeExpressionFlagLvExtra Name1 Name2ObjectId Order,Cn,C ,nCY"ObjectIdAttribute   -YS  Y Y Y  Y  Y  Y  Y  Y ccolumn grbiticolumnszColumnszObject$szReferencedColumn$szReferencedObjectszRelationship Dede D YYYszObject$szReferencedObjectszRelationship v1b N  : k &     @   OJmJJMMQkkfJUQkOJmJLJkQkSdi`k`dOo^QkiQ^JmYdbkWYfkiQfdimkkMiYfmkkvkiQ^mJL^Qk`kvkJMMQkkkmdiJUQ`kvkJMQk`kvkMd`f^QuMd^o`bk`kvkMd`f^QumvfQ+JmmJMW`Qbm`kvkMd`f^QumvfQ+OQMY`J^`kvkMd`f^QumvfQ+UoYO`kvkMd`f^QumvfQ+YQQQOdoL^Q `kvkMd`f^QumvfQ+YQQQkYbU^Q `kvkMd`f^QumvfQ+^dbU `kvkMd`f^QumvfQ+kWdim `kvkMd`f^QumvfQ+mQum`kvkMd`f^QumvfQ+obkYUbQOLvmQ `kvkbJqfJbQUidofMJmQUdiYQk`kvkbJqfJbQUidofk`kvkbJqfJbQUidofmddL[QMmk`kvkbJqfJbQdL[QMmYOk`kvkdL[QMmk`kvkhoQiYQk`kvkiQ^JmYdbkWYfkobokQO+mJL^Q`kvkOLko``JivYbSdokQiOQSYbQO  @ @ @ @     !#%')049>Av1@P  @ @ @ @ @ @ @ @ @ @ @ @DDDDDDDDD D D D DDDDDDDDDDDDD$D%FFFD)D*FF F F F F D+D,D-DFDGDHD@DADBD=D>D?D:D;D<D7D8D9!D4!D5!D6#D1#D2#D3%DC%DD%DE'D.'D/'D0)D&)D')D(0DI0DJ0DK4DL4DM4DN9DO9F9F>F>F>FAFAFAFDDDD DD DDDD!D"D#v1 v1@    d _ Z  G q  0 =X5a&Aa&L@qL@unused_tablej@EFFF:::::::8 @>|L@|L@ MSysNavPaneObjectIDsjJJJJJJJJJJH 9ͬL@ͬL@ MSysNavPaneGroupToObjectsjTTTTTTTTTTR 4ͬL@ͬL@ MSysNavPaneGroupsjDDDDDDDDDDB 0ͬL@ͬL@ MSysNavPaneGroupCategoriesjVVVVVVVVVVT )dL@aIL@ MSysAccessStoragejDDDDDDDDDDB  ͬL@ͬL@UserDefinedj@EDDD88888886 @ ͬL@ͬL@SummaryInfoj@EDDD88888886 @ͬL@ͬL@SysRelj.........., ͬL@ͬL@Scriptsj0000000000. ͬL@ͬL@Reportsj0000000000. ͬL@ͬL@Modulesj0000000000. ͬL@ͬL@Formsj,,,,,,,,,,* ͬL@ͬL@DataAccessPagesj@@@@@@@@@@> 'L@L@MSysComplexType_AttachmentTTTTTTTTTTT %L@L@MSysComplexType_TextHHHHHHHHHHH #L@L@MSysComplexType_DecimalNNNNNNNNNNN !L@L@MSysComplexType_GUIDHHHHHHHHHHH L@L@MSysComplexType_IEEEDoubleTTTTTTTTTTT L@L@MSysComplexType_IEEESingleTTTTTTTTTTT L@L@MSysComplexType_LongHHHHHHHHHHH L@L@MSysComplexType_ShortJJJJJJJJJJJ L@L@MSysComplexType_UnsignedByteXXXXXXXXXXX L@L@MSysComplexColumnsDDDDDDDDDDD L@L@MSysRelationshipsk DDDDDDDDDDB L@L@MSysQueriesk 88888888886 L@L@MSysACEsk 22222222220 L@L@MSysObjectsk 88888888886 L@=EL@MSysDbj@E:::......., @L@L@Relationshipsk <<<<<<<<<<: L@L@Databasesk 44444444442 L@L@Tablesk .........., D YN Y Y  Y Y Y ColumnNameComplexID&ComplexTypeObjectID"ConceptualTableIDFlatTableIDYYY(IdxConceptualTableIDIdxFlatTableID IdxIDv1@    fYkNY  ValuedvfYkNY  ValuedvfYkNY  ValuedvfYkNY  ValuedvfYkN  Y  ValuedvfYkN""Y  ValuedvfYkN$$Yk ValuedvfYkN&& Y k ValuedvaYN(( Y aY  Y Y  Y  Y FileDataFileFlagsFileNameFileTimeStampFileTypeFileURL((((Hv1bV Y1N**  Y PY Y ` Y , Y ,Y CY CDateCreateDateUpdateIdLvNameParentIdType,C,C*-C,։,C,C*.C,ց,C,C*/C,ցYYYIdParentIdIdParentIdName**v1b@ )&@c$ o q ( =  ˥P ʼq(IHHeH&HJuL@uL@1PROJECT,($ wIuL@uL@0PROJECTwm0,( w@uL@uL@/dir$  wHuL@uL@._VBA_PROJECT62. wKuL@uL@-AcessVBAData62. wiL@iL@,@,PROJECT840" iL@iL@+PROJECTwm>:6$ iL@iL@*@,dir0,(" iL@iL@)@,_VBA_PROJECTB>:" iL@iL@( AcessVBADataNJF.  L@uL@VBA$  whL@uL@VBAProject2.* w L@ L@MSysDbDirData PLH8 ͬL@ͬL@ImExSpecs0,( wͬL@ͬL@CustomGroups62. wͬL@ͬL@ Blob &" wͬL@ͬL@ 0   wͬL@ L@ Databases0,( wͬL@ͬL@ DataAccessPages<84 wͬL@ͬL@ Cmdbars,($ wͬL@uL@VBA$  wͬL@ͬL@Scripts,($ wͬL@ͬL@Modules,($ wͬL@ͬL@Reports,($ wͬL@ͬL@Forms($  wͬL@bOL@ijPropDataPLH8 ͬL@ͬL@MSysAccessStorage_SCRATCHPLH wͬL@bOL@MSysAccessStorage_ROOTJFB wnLVAL aam  aID="{AB5155F2-F476-415A-B1FA-54F6FF3631D6}" Name="Database1" HelpContextID="0" VersionCompatible32="393222000" CMG="080AE3F1E7F1E7F1E7F1E7" DPB="1012FBFCFCFCFCFC" GC="181AF304F404F4FB" [Host Extender Info] &H00000001={3832D640-CF90-11CF-8E43-00A0C911005A};VBE;&H00000000 am  *\G{000204EF-0000-0000-C000-000000000046}#4.0#9#C:\PROGRA~1\COMMON~1\MICROS~1\VBA\VBA6\VBE6.DLL#Visual Basic For Applications*\G{4AFFC9A0-5F99-101B-AF4E-00AA003F0F07}#9.0#0#C:\Program Files\Microsoft Office\Office12\MSACC.OLB#Microsoft Access 12.0 Object Library*\G{00020430-0000-0000-C000-000000000046}#2.0#0#C:\Windows\system32\stdole2.tlb#OLE Automation:*\G{4AC9E1DA-5BAD-4AC7-86E3-24F4CDCECA28}#c.0#0#C:\PROGRA~1\COMMON~1\MICROS~1\OFFICE12\ACEDAO.DLL#Microsoft Office 12.0 Access database engine Object Library GbIzx $AccessVBAWin16~Win32MacVBA6# Database1Lstdole`DAO<   0* pHd DatabaseD10@ = p GbIJ J< rs@tdole>stdol@e h%^*\G{00020430-C 0046}#2.0#0#C:\Windows\system32\e2.tlb#OLE AutomatioFn`DAO>JDAjOA A4AC9E1DA- 5BAD-7-86E3-24F4CDCECA28}#cAPROGRA~1\COMMONMICROSOFFICE12\ACE9.DLL#Microsoft Office 1f Access d engine Object LibraryOz Q ) @ @ @++++++++ + +  +  +  + + ++++-+!.+"/+#0+$1+% )@@@@@@+++++++ + +  + + +++-+!  +  +  + +0+$1+%.+"/+#5 )@@@@@M`OLJik+Mokmd`Uidofk+ OJmJJMMQkkfJUQk+ OJmJLJkQk+ Sdi`k+Y`QukfQMk+`dOo^Qk+`kvkJMMQkkkmdiJUQ+iddm+fidfOJmJ+iQfdimk+kMiYfmk+qLJ+`kvkJMMQkkkmdiJUQ+kMiJmMW+JMQkkqLJOJmJ+!qLJfid[QMm+ 6+  OYiOJmJ+ L^dL+ fid[QMm+%fid[QMms`+$qLJ++qLJ+fid[QMm+"OYi+#>YN11 Y Y PY  Y Y ,VY V`Y ` Filter FlagsIdNamePosition SelectedObjectIDType`V,`V,13,V`YIdv1m0Custom ^ V v0222 RY N55  Y Y Y  Y Y ,VY V`Y ` FlagsGroupCategoryIDIdName"Object Type GroupObjectIDPosition`V,`V,57,V``V,`V,58,V`YYGroupCategoryIDIdv1@@ 4 yLO"  A w  w Custom Group 162.*  w w w w w w w w 4 @66 666666666  4 @66666666 6 6  6  YN::Y Y Y Y  Y ,VY V`Y ` FlagsGroupIDIconIdNameObjectIDPositionV,`V,`V:<`,VV,`V,`V:=`,VYYGroupIDIdv1 9 A o ? o9 ; ;9;;RY eehaN??Y  Y  _Y _IdNameTypedv)uL@uL@/@ ,dir0,(" &YANBBY IDAABCAYPrimaryKeyv1 AfPvcP=*z @ -  } j C 0  m Z G p\I6"r_K8%taN:'9j 4k 4ThԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p 4j 0k 0ThԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p 0j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j %k %hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p %j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j !k !hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p !j #k #hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p #j 'k 'hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p 'j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j ThԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j )k )hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p )j k k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k k j hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k k j hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k k j hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k k j hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k k j hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k j k j hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p k LVAL` MR2ANSI Query Mode(Themed Form ControlsAccessVersion NavPane CategoryUseMDIMode ShowDocumentTabs BuildHasOfflineLists>Picture Property Storage Format.CheckTruncatedNumFieldsProjVerNavPane ClosedNavPane Width*NavPane Category NameNavPane View ByNavPane Sort By    09.50     F     W      MR2ANSI Query Mode(Themed Form ControlsAccessVersion NavPane CategoryUseMDIMode ShowDocumentTabs BuildHasOfflineLists>Picture Property Storage Format.CheckTruncatedNumFieldsProjVerNavPane ClosedNavPane Width*NavPane Category NameNavPane View ByNavPane Sort By    09.50     F     W      MR2 ValidationRuleValidationTextOrientation FilterOrderByOrderByOnNameMapDefaultViewGUID8DisplayViewsOnSharePointSiteTotalsRowFilterOnLoadOrderByOnLoadHideNewFieldColumnWidthColumnOrderColumnHiddenDescription FormatCaptionSmartTagsTextAlignAggregateType  6 . U   SHgCTt          F ID     MR2h ReplicateProject>DisplayAllViewsOnSharePointSite!  MR20 Title AuthorCompanyl Database1, $Michael Kleehammer CheckFree5 vcO<)   x e Ak AhԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p Aj k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k hԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j k ThԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p j >k >ThԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p >j 9k 9ThԖ_,GkSިtҀ5iOQH4&{OXR7\6·2Tz jFyܣŗWܨ$lOFּKweAm ٹB6TU*)p 4 > h=}0 k " e  a @  j 3 Aunused_table">MSysNavPaneObjectIDs2.9MSysNavPaneGroupToObjects<84MSysNavPaneGroups,(0MSysNavPaneGroupCategories>:)MSysAccessStorage,(UserDefined SummaryInfo SysRelScriptsReportsModulesFormsDataAccessPages($'MSysComplexType_Attachment>:%MSysComplexType_Text2.#MSysComplexType_Decimal84!MSysComplexType_GUID2.MSysComplexType_IEEEDouble>:MSysComplexType_IEEESingle>:MSysComplexType_Long2.MSysComplexType_Short40MSysComplexType_UnsignedByteB>MSysComplexColumns.*MSysRelationships,(MSysQueries MSysACEsMSysObjects MSysDbRelationships$ DatabasesTables)uL@uL@.@ ,_VBA_PROJECTB>:" )uL@uL@0PROJECTwm>:6$ )uL@uL@1@ ,PROJECT840" )uL@uL@- AcessVBADataNJF. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests3/empty.mdb0000664000175000017500000056000000000000000017473 0ustar00mkleehammermkleehammerStandard Jet DBnb` Ugr@?~1y0̝cßFNa7:ޜ(t,`3{6߱nC53Sy[/|*| WL@WL@SysRelq(.........., WL@WL@Modulesq(0000000000. WL@WL@Scriptsq(0000000000. WL@WL@Reportsq(0000000000. WL@WL@Formsq(,,,,,,,,,,*  WL@WL@UserDefinedq(@DDD88888886 @ WL@WL@SummaryInfoq(@DDD88888886 @9L@9L@MSysAccessObjectsq(DDDDDDDDDDB sL@sL@MSysRelationshipsp*DDDDDDDDDDB sL@sL@MSysQueriesp*88888888886 sL@sL@MSysACEsp*22222222220 sL@sL@MSysObjectsp*88888888886 sL@T紐L@MSysDbq(H@:::......., @sL@sL@Relationshipsp*<<<<<<<<<<: sL@sL@Databasesp*44444444442 sL@sL@Tablesp*.........., C{hUB/A S @ ,  |  B /   l Y 2 o\I"r_K8+p( +TsXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep +q( !p( !TsXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep !q( p( TsXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep q( p( TsXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep q( p- q( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p- q( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p- q( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p- q( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p- q( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p- q( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p( p( p( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep q( p( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( p( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep TsXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep TsXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep sXnTA!VXa\ΊOx53dY9Yߡ!ov_ sCWw'L KwPn|wboZ%%ɚoe*ltKep p( q( q( q( p- q( p- q( q( q( q( a LVALhhΓ MR2ANSI Query Mode(Themed Form ControlsAccessVersion NavPane CategoryUseMDIMode ShowDocumentTabs Build.CheckTruncatedNumFieldsNavPane ClosedNavPane Width*NavPane Category NameNavPane View ByNavPane Sort By    08.50      MR2ANSI Query Mode(Themed Form ControlsAccessVersion NavPane CategoryUseMDIMode ShowDocumentTabs Build.CheckTruncatedNumFieldsNavPane ClosedNavPane Width*NavPane Category NameNavPane View ByNavPane Sort By    08.50            MR2h ReplicateProject>DisplayAllViewsOnSharePointSite!  MR20 Title AuthorCompanyd empty, $Michael Kleehammer CheckFree Y̯klNY Y kDataIDklklkl`kYAOIndexv1@K[fVwccessObjectsq(DDDDDDDDDDB K[ࡱ> Root Entry\8PropData Forms P/ꏊP/ꏊReportsP/ꏊP/ꏊiMSysDbModulesP/ꏊP/ꏊScriptsP/ꏊP/ꏊVBAP/ꏊP/ꏊCmdbarsP/ꏊP/ꏊDataAccessPages P/ꏊP/ꏊDatabases P/ꏊ\80 P/ꏊ\8Blob CustomGroups P/ꏊP/ꏊImExSpecsP/ꏊP/ꏊDirDataK[>YN Y Y fqY  Y Y @LY Y  Filter FlagsIdNamePosition SelectedObjectIDType,,,YIdv1 %%% RY N Y Y Y  Y Y ,Y Y  FlagsGroupCategoryIDIdName"Object Type GroupObjectIDPosition,,,,, ,YYGroupCategoryIDIdv1@@  @&&&&&&&& & &  &   @&&&&&&&&&& &  YN""Y Y Y Y @ Y ,Y Y  FlagsGroupIDIconIdNameObjectIDPosition,,"#,,,"$,YYGroupIDIdv1!*! *qCustomg e e  yLO"" ' }  } Custom Group 162.*&" } } } } } } } }&Y@{3N((Y ID(()YPrimaryKey '!'  wRYsL@N,,Y  Y LY IdNameTypedv  +h=T)  M  I +MSysNavPaneObjectIDs2.!MSysNavPaneGroupToObjects<8MSysNavPaneGroups,(MSysNavPaneGroupCategories>:DataAccessPages($SysRelModulesScriptsReportsFormsUserDefined SummaryInfo MSysAccessObjects,(MSysRelationships,(MSysQueries MSysACEsMSysObjects MSysDbRelationships$ DatabasesTables././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests3/exceltests.py0000664000175000017500000001112200000000000020402 0ustar00mkleehammermkleehammer#!/usr/bin/python # Tests for reading from Excel files. # # I have not been able to successfully create or modify Excel files. import sys, os, re import unittest from os.path import abspath from testutils import * CNXNSTRING = None class ExcelTestCase(unittest.TestCase): def __init__(self, method_name): unittest.TestCase.__init__(self, method_name) def setUp(self): self.cnxn = pyodbc.connect(CNXNSTRING, autocommit=True) self.cursor = self.cnxn.cursor() for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, (int, long))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def test_read_sheet(self): # The first method of reading data is to access worksheets by name in this format [name$]. # # Our second sheet is named Sheet2 and has two columns. The first has values 10, 20, 30, etc. rows = self.cursor.execute("select * from [Sheet2$]").fetchall() self.assertEqual(len(rows), 5) for index, row in enumerate(rows): self.assertEqual(row.s2num, float(index + 1) * 10) def test_read_range(self): # The second method of reading data is to assign a name to a range of cells and access that as a table. # # Our first worksheet has a section named Table1. The first column has values 1, 2, 3, etc. rows = self.cursor.execute("select * from Table1").fetchall() self.assertEqual(len(rows), 10) for index, row in enumerate(rows): self.assertEqual(row.num, float(index + 1)) self.assertEqual(row.val, chr(ord('a') + index)) def test_tables(self): # This is useful for figuring out what is available tables = [ row.table_name for row in self.cursor.tables() ] assert 'Sheet2$' in tables, 'tables: %s' % ' '.join(tables) # def test_append(self): # rows = self.cursor.execute("select s2num, s2val from [Sheet2$]").fetchall() # # print rows # # nextnum = max([ row.s2num for row in rows ]) + 10 # # self.cursor.execute("insert into [Sheet2$](s2num, s2val) values (?, 'z')", nextnum) # # row = self.cursor.execute("select s2num, s2val from [Sheet2$] where s2num=?", nextnum).fetchone() # self.assertTrue(row) # # print 'added:', nextnum, len(rows), 'rows' # # self.assertEqual(row.s2num, nextnum) # self.assertEqual(row.s2val, 'z') # # self.cnxn.commit() def main(): from optparse import OptionParser parser = OptionParser() #usage=usage) parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if args: parser.error('no arguments expected') global CNXNSTRING path = dirname(abspath(__file__)) filename = join(path, 'test.xls') assert os.path.exists(filename) CNXNSTRING = 'Driver={Microsoft Excel Driver (*.xls)};DBQ=%s;READONLY=FALSE' % filename if options.verbose: cnxn = pyodbc.connect(CNXNSTRING, autocommit=True) print_library_info(cnxn) cnxn.close() suite = load_tests(ExcelTestCase, options.test) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629143953.0 pyodbc-4.0.32/tests3/informixtests.py0000664000175000017500000013213400000000000021144 0ustar00mkleehammermkleehammer#!/usr/bin/python # -*- coding: latin-1 -*- usage = """\ usage: %prog [options] connection_string Unit tests for Informix DB. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. You can also put the connection string into a tmp/setup.cfg file like so: [informixtests] connection-string=DRIVER={IBM INFORMIX ODBC DRIVER (64-bit)};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db """ import sys, os, re import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import join, getsize, dirname, abspath from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) / len(_TESTSTR) v = _TESTSTR * c return v[:length] class InformixTestCase(unittest.TestCase): SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] IMAGE_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] def __init__(self, method_name, connection_string): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string def setUp(self): self.cnxn = pyodbc.connect(self.connection_string) self.cursor = self.cnxn.cursor() for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass for i in range(3): try: self.cursor.execute("drop procedure proc%d" % i) self.cnxn.commit() except: pass try: self.cursor.execute('drop function func1') self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, (int, long))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def test_noscan(self): self.assertEqual(self.cursor.noscan, False) self.cursor.noscan = True self.assertEqual(self.cursor.noscan, True) def test_guid(self): self.cursor.execute("create table t1(g1 uniqueidentifier)") self.cursor.execute("insert into t1 values (newid())") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), 36) def test_nextset(self): self.cursor.execute("create table t1(i int)") for i in range(4): self.cursor.execute("insert into t1(i) values(?)", i) self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") for i, row in enumerate(self.cursor): self.assertEqual(i, row.i) self.assertEqual(self.cursor.nextset(), True) for i, row in enumerate(self.cursor): self.assertEqual(i + 2, row.i) def test_fixed_unicode(self): value = u"t\xebsting" self.cursor.execute("create table t1(s nchar(7))") self.cursor.execute("insert into t1 values(?)", u"t\xebsting") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), unicode) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def _test_strtype(self, sqltype, value, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), type(value)) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) # Reported by Andy Hochhaus in the pyodbc group: In 2.1.7 and earlier, a hardcoded length of 255 was used to # determine whether a parameter was bound as a SQL_VARCHAR or SQL_LONGVARCHAR. Apparently SQL Server chokes if # we bind as a SQL_LONGVARCHAR and the target column size is 8000 or less, which is considers just SQL_VARCHAR. # This means binding a 256 character value would cause problems if compared with a VARCHAR column under # 8001. We now use SQLGetTypeInfo to determine the time to switch. # # [42000] [Microsoft][SQL Server Native Client 10.0][SQL Server]The data types varchar and text are incompatible in the equal to operator. self.cursor.execute("select * from t1 where s=?", value) def _test_strliketype(self, sqltype, value, colsize=None): """ The implementation for text, image, ntext, and binary. These types do not support comparison operators. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), type(value)) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) # # varchar # def test_varchar_null(self): self._test_strtype('varchar', None, 100) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, len(value)) return t for value in ANSI_FENCEPOSTS: locals()['test_varchar_%s' % len(value)] = _maketest(value) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) def test_varchar_upperlatin(self): self._test_strtype('varchar', '') # # unicode # def test_unicode_null(self): self._test_strtype('nvarchar', None, 100) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('nvarchar', value, len(value)) return t for value in UNICODE_FENCEPOSTS: locals()['test_unicode_%s' % len(value)] = _maketest(value) def test_unicode_upperlatin(self): self._test_strtype('varchar', '') # # binary # def test_null_binary(self): self._test_strtype('varbinary', None, 100) def test_large_null_binary(self): # Bug 1575064 self._test_strtype('varbinary', None, 4000) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('varbinary', buffer(value), len(value)) return t for value in ANSI_FENCEPOSTS: locals()['test_binary_%s' % len(value)] = _maketest(value) # # image # def test_image_null(self): self._test_strliketype('image', None) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strliketype('image', buffer(value)) return t for value in IMAGE_FENCEPOSTS: locals()['test_image_%s' % len(value)] = _maketest(value) def test_image_upperlatin(self): self._test_strliketype('image', buffer('')) # # text # # def test_empty_text(self): # self._test_strliketype('text', buffer('')) def test_null_text(self): self._test_strliketype('text', None) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strliketype('text', value) return t for value in ANSI_FENCEPOSTS: locals()['test_text_%s' % len(value)] = _maketest(value) def test_text_upperlatin(self): self._test_strliketype('text', '') # # bit # def test_bit(self): value = True self.cursor.execute("create table t1(b bit)") self.cursor.execute("insert into t1 values (?)", value) v = self.cursor.execute("select b from t1").fetchone()[0] self.assertEqual(type(v), bool) self.assertEqual(v, value) # # decimal # def _decimal(self, precision, scale, negative): # From test provided by planders (thanks!) in Issue 91 self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) # Construct a decimal that uses the maximum precision and scale. decStr = '9' * (precision - scale) if scale: decStr = decStr + "." + '9' * scale if negative: decStr = "-" + decStr value = Decimal(decStr) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(v, value) def _maketest(p, s, n): def t(self): self._decimal(p, s, n) return t for (p, s, n) in [ (1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (38, 38, True) ]: locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) def test_decimal_e(self): """Ensure exponential notation decimals are properly handled""" value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 self.cursor.execute("create table t1(d decimal(10, 2))") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(result, value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_fixed_str(self): value = "testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def test_empty_unicode(self): self.cursor.execute("create table t1(s nvarchar(20))") self.cursor.execute("insert into t1 values(?)", u"") def test_unicode_query(self): self.cursor.execute(u"select 1") def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # date, time, datetime # def test_datetime(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(value), datetime) self.assertEqual(value, result) def test_datetime_fraction(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime # supported is xxx000. value = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(value), datetime) self.assertEqual(result, value) def test_datetime_fraction_rounded(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the # database supports. full = datetime(2007, 1, 15, 3, 4, 5, 123456) rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", full) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(result, rounded) def test_date(self): value = date.today() self.cursor.execute("create table t1(d date)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(type(value), date) self.assertEqual(value, result) def test_time(self): value = datetime.now().time() # We aren't yet writing values using the new extended time type so the value written to the database is only # down to the second. value = value.replace(microsecond=0) self.cursor.execute("create table t1(t time)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select t from t1").fetchone()[0] self.assertEqual(type(value), time) self.assertEqual(value, result) def test_datetime2(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime2)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(value), datetime) self.assertEqual(value, result) # # ints and floats # def test_int(self): value = 1234 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_bigint(self): input = 3000000000 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_float(self): value = 1234.567 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) # # stored procedures # # def test_callproc(self): # "callproc with a simple input-only stored procedure" # pass def test_sp_results(self): self.cursor.execute( """ Create procedure proc1 AS select top 10 name, id, xtype, refdate from sysobjects """) rows = self.cursor.execute("exec proc1").fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_results_from_temp(self): # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. # If you don't do this, you'd need to call nextset() once to skip it. self.cursor.execute( """ Create procedure proc1 AS set nocount on select top 10 name, id, xtype, refdate into #tmptable from sysobjects select * from #tmptable """) self.cursor.execute("exec proc1") self.assertTrue(self.cursor.description is not None) self.assertTrue(len(self.cursor.description) == 4) rows = self.cursor.fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_results_from_vartbl(self): self.cursor.execute( """ Create procedure proc1 AS set nocount on declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime) insert into @tmptbl select top 10 name, id, xtype, refdate from sysobjects select * from @tmptbl """) self.cursor.execute("exec proc1") rows = self.cursor.fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_with_dates(self): # Reported in the forums that passing two datetimes to a stored procedure doesn't work. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@d1 datetime, @d2 datetime) AS declare @d as int set @d = datediff(year, @d1, @d2) select @d """) self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(rows[0][0] == 0) # 0 years apart def test_sp_with_none(self): # Reported in the forums that passing None caused an error. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@x varchar(20)) AS declare @y varchar(20) set @y = @x select @y """) self.cursor.execute("exec test_sp ?", None) rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(rows[0][0] == None) # 0 years apart # # rowcount # def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) def test_rowcount_select(self): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a select statement, so we'll test for that behavior. This is valid behavior according to the DB API specification, but people don't seem to like it. """ self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEqual(self.cursor.rowcount, -1) rows = self.cursor.fetchall() self.assertEqual(len(rows), count) self.assertEqual(self.cursor.rowcount, -1) def test_rowcount_reset(self): "Ensure rowcount is reset to -1" self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.assertEqual(self.cursor.rowcount, 1) self.cursor.execute("create table t2(i int)") self.assertEqual(self.cursor.rowcount, -1) # # always return Cursor # # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very # confusing when things went wrong and added very little value even when things went right since users could always # use: cursor.execute("...").rowcount def test_retcursor_delete(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_select(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("select * from t1") self.assertEqual(v, self.cursor) # # misc # def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_temp_select(self): # A project was failing to create temporary tables via select into. self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(v, "testing") self.cursor.execute("select s into t2 from t1") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(v, "testing") def test_money(self): d = Decimal('123456.78') self.cursor.execute("create table t1(i int identity(1,1), m money)") self.cursor.execute("insert into t1(m) values (?)", d) v = self.cursor.execute("select m from t1").fetchone()[0] self.assertEqual(v, d) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_one(self): "Pass executemany a single sequence" self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, "test") ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_concatenation(self): v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() self.assertEqual(row.both, v2 + v3) def test_view_select(self): # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. # Create a table (t1) with 3 rows and a view (t2) into it. self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") for i in range(3): self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) self.cursor.execute("create view t2 as select * from t1") # Select from the view self.cursor.execute("select * from t2") rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(len(rows) == 3) def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(self.connection_string, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def test_unicode_results(self): "Ensure unicode_results forces Unicode" othercnxn = pyodbc.connect(self.connection_string, unicode_results=True) othercursor = othercnxn.cursor() # ANSI data in an ANSI column ... othercursor.execute("create table t1(s varchar(20))") othercursor.execute("insert into t1 values(?)", 'test') # ... should be returned as Unicode value = othercursor.execute("select s from t1").fetchone()[0] self.assertEqual(value, u'test') def test_informix_callproc(self): try: self.cursor.execute("drop procedure pyodbctest") self.cnxn.commit() except: pass self.cursor.execute("create table t1(s varchar(10))") self.cursor.execute("insert into t1 values(?)", "testing") self.cursor.execute(""" create procedure pyodbctest @var1 varchar(32) as begin select s from t1 return end """) self.cnxn.commit() # for row in self.cursor.procedureColumns('pyodbctest'): # print row.procedure_name, row.column_name, row.column_type, row.type_name self.cursor.execute("exec pyodbctest 'hi'") # print self.cursor.description # for row in self.cursor: # print row.s def test_skip(self): # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. self.cursor.execute("create table t1(id int)"); for i in range(1, 5): self.cursor.execute("insert into t1 values(?)", i) self.cursor.execute("select id from t1 order by id") self.assertEqual(self.cursor.fetchone()[0], 1) self.cursor.skip(2) self.assertEqual(self.cursor.fetchone()[0], 4) def test_timeout(self): self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) self.cnxn.timeout = 30 self.assertEqual(self.cnxn.timeout, 30) self.cnxn.timeout = 0 self.assertEqual(self.cnxn.timeout, 0) def test_sets_execute(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) self.assertRaises(pyodbc.ProgrammingError, f) def test_sets_executemany(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.executemany("insert into t1 (word) values (?)", [words]) self.assertRaises(TypeError, f) def test_row_execute(self): "Ensure we can use a Row object as a parameter to execute" self.cursor.execute("create table t1(n int, s varchar(10))") self.cursor.execute("insert into t1 values (1, 'a')") row = self.cursor.execute("select n, s from t1").fetchone() self.assertNotEqual(row, None) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.execute("insert into t2 values (?, ?)", row) def test_row_executemany(self): "Ensure we can use a Row object as a parameter to executemany" self.cursor.execute("create table t1(n int, s varchar(10))") for i in range(3): self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) rows = self.cursor.execute("select n, s from t1").fetchall() self.assertNotEqual(len(rows), 0) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.executemany("insert into t2 values (?, ?)", rows) def test_description(self): "Ensure cursor.description is correct" self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") self.cursor.execute("select * from t1") # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the # items I do know. # int t = self.cursor.description[0] self.assertEqual(t[0], 'n') self.assertEqual(t[1], int) self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # varchar(8) t = self.cursor.description[1] self.assertEqual(t[0], 's') self.assertEqual(t[1], str) self.assertEqual(t[4], 8) # precision self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # decimal(5, 2) t = self.cursor.description[2] self.assertEqual(t[0], 'd') self.assertEqual(t[1], Decimal) self.assertEqual(t[4], 5) # precision self.assertEqual(t[5], 2) # scale self.assertEqual(t[6], True) # nullable def test_none_param(self): "Ensure None can be used for params other than the first" # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the # first column, but did not work for later columns. # # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, # binary/varbinary won't allow an implicit conversion. self.cursor.execute("create table t1(n int, blob varbinary(max))") self.cursor.execute("insert into t1 values (1, newid())") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 1) self.assertEqual(type(row.blob), buffer) self.cursor.execute("update t1 set n=?, blob=?", 2, None) row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 2) self.assertEqual(row.blob, None) def test_output_conversion(self): def convert(value): # `value` will be a string. We'll simply add an X at the beginning at the end. return 'X' + value + 'X' self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) self.cursor.execute("create table t1(n int, v varchar(10))") self.cursor.execute("insert into t1 values (1, '123.45')") value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') # Now clear the conversions and try again. There should be no Xs this time. self.cnxn.clear_output_converters() value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') def test_too_large(self): """Ensure error raised if insert fails due to truncation""" value = 'x' * 1000 self.cursor.execute("create table t1(s varchar(800))") def test(): self.cursor.execute("insert into t1 values (?)", value) self.assertRaises(pyodbc.DataError, test) def test_geometry_null_insert(self): def convert(value): return value self.cnxn.add_output_converter(-151, convert) # -151 is SQL Server's geometry self.cursor.execute("create table t1(n int, v geometry)") self.cursor.execute("insert into t1 values (?, ?)", 1, None) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, None) self.cnxn.clear_output_converters() def test_login_timeout(self): # This can only test setting since there isn't a way to cause it to block on the server side. cnxns = pyodbc.connect(self.connection_string, timeout=2) def test_row_equal(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test')") row1 = self.cursor.execute("select n, s from t1").fetchone() row2 = self.cursor.execute("select n, s from t1").fetchone() b = (row1 == row2) self.assertEqual(b, True) def test_row_gtlt(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test1')") self.cursor.execute("insert into t1 values (1, 'test2')") rows = self.cursor.execute("select n, s from t1 order by s").fetchall() self.assertTrue(rows[0] < rows[1]) self.assertTrue(rows[0] <= rows[1]) self.assertTrue(rows[1] > rows[0]) self.assertTrue(rows[1] >= rows[0]) self.assertTrue(rows[0] != rows[1]) rows = list(rows) rows.sort() # uses < def test_context_manager(self): with pyodbc.connect(self.connection_string) as cnxn: cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) # The connection should be closed now. def test(): cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertRaises(pyodbc.ProgrammingError, test) def test_untyped_none(self): # From issue 129 value = self.cursor.execute("select ?", None).fetchone()[0] self.assertEqual(value, None) def test_large_update_nodata(self): self.cursor.execute('create table t1(a varbinary(max))') hundredkb = buffer('x'*100*1024) self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) def test_func_param(self): self.cursor.execute(''' create function func1 (@testparam varchar(4)) returns @rettest table (param varchar(4)) as begin insert @rettest select @testparam return end ''') self.cnxn.commit() value = self.cursor.execute("select * from func1(?)", 'test').fetchone()[0] self.assertEqual(value, 'test') def test_no_fetch(self): # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to # confuse the driver. self.cursor.execute('select 1') self.cursor.execute('select 1') self.cursor.execute('select 1') def test_drivers(self): drivers = pyodbc.drivers() self.assertEqual(list, type(drivers)) self.assertTrue(len(drivers) > 1) m = re.search('DRIVER={?([^}]+?)}?;', self.connection_string, re.IGNORECASE) current = m.group(1) self.assertTrue(current in drivers) def main(): from optparse import OptionParser parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('informixtests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string) print_library_info(cnxn) cnxn.close() suite = load_tests(InformixTestCase, options.test, connection_string) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests3/issue802.py0000664000175000017500000000664300000000000017615 0ustar00mkleehammermkleehammer""" This tests ensures that there is no memory leakage when params.cpp:ExecuteMulti function does conversion of Unicode to Bytes. In ExecuteMulti function after DoExecute label SQLExecute returns One scenario where SQLParamData function will be used is when there is a varchar(max), a parameter with an unknown size in the INSERT INTO query. In this case, a unicode string is being added to a varchar(max) field. In order to execute the INSERT INTO query, SQLExecute is used. SQLExecute will return SQL_NEED_DATA (SQL_NEED_DATA = 99). Then SQLParamData will be used to create a SQL parameter and will return SQL_NEED_DATA too. When PyUnicode_Check(pInfo->cell) is true, a conversion of Unicode to Bytes is required before it can be used by SQLPutData. During this conversion a new PyObject, called bytes, is created and assigned to objCell. This object never gets Py_XDECREF, and the data will stay stuck in the memory without a reference. This memory leak is only visible when using varchar(max) because varchar(max) required additional allocation of memory that correspond to the size of the input while varchar(100) for example will not case another SQL_NEED_DATA status. To see how to reproduce the memory leak, look at https://github.com/mkleehammer/pyodbc/issues/802 """ import os import unittest import psutil from tests3.testutils import add_to_path, load_setup_connection_string add_to_path() import pyodbc KB = 1024 MB = KB * 1024 CONNECTION_STRING = None CONNECTION_STRING_ERROR_MESSAGE = ( r"Please create tmp\setup.cfg file or set a valid value to CONNECTION_STRING." ) process = psutil.Process() def memory(): return process.memory_info().vms class SQLPutDataUnicodeToBytesMemoryLeakTestCase(unittest.TestCase): driver = pyodbc @classmethod def setUpClass(cls): filename = os.path.splitext(os.path.basename(__file__))[0] cls.connection_string = ( load_setup_connection_string(filename) or CONNECTION_STRING ) if not cls.connection_string: return ValueError(CONNECTION_STRING_ERROR_MESSAGE) def test__varchar_max__inserting_many_rows__same_memory_usage(self): varchar_limit = "max" num_rows = 50_000 data = [(i, f"col{i:06}", 3.14159265 * (i + 1)) for i in range(num_rows)] table_name = "pd_test" col_names = ["id", "txt_col", "float_col"] ins_sql = f"INSERT INTO {table_name} ({','.join(col_names)}) VALUES ({','.join('?' * len(col_names))})" with pyodbc.connect(self.connection_string, autocommit=True) as cnxn: # First time adds memory, not related to the test. self.action(cnxn, data, ins_sql, table_name, varchar_limit) for iteration in range(3): start_memory = memory() self.action(cnxn, data, ins_sql, table_name, varchar_limit) end_memory = memory() memory_diff = end_memory - start_memory self.assertLess(memory_diff, 100 * KB) def action(self, cnxn, data, ins_sql, table_name, varchar_limit): crsr = cnxn.cursor() crsr.execute(f"DROP TABLE IF EXISTS {table_name}") crsr.execute( f"CREATE TABLE {table_name} (id int, txt_col varchar({varchar_limit}), float_col float(53))" ) crsr.fast_executemany = True crsr.executemany(ins_sql, data) crsr.close() def main(): unittest.main() if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629143953.0 pyodbc-4.0.32/tests3/mysqltests.py0000775000175000017500000006724400000000000020472 0ustar00mkleehammermkleehammer#!/usr/bin/env python3 usage = """\ usage: %prog [options] connection_string Unit tests for MySQL. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. These tests use the pyodbc library from the build directory, not the version installed in your Python directories. You must run `python setup.py build` before running these tests. You can also put the connection string into a tmp/setup.cfg file like so: [mysqltests] connection-string=DRIVER=MySQL ODBC 8.0 ANSI Driver;charset=utf8mb4;SERVER=localhost;DATABASE=pyodbc;UID=root;PWD=rootpw Note: Use the "ANSI" (not the "Unicode") driver and include charset=utf8mb4 in the connection string so the high-Unicode tests won't fail. """ import sys, os, re import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import join, getsize, dirname, abspath, basename from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of composed of `seed` to make a string `length` characters long. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) // len(_TESTSTR) v = _TESTSTR * c return v[:length] class MySqlTestCase(unittest.TestCase): INTEGERS = [ -1, 0, 1, 0x7FFFFFFF ] BIGINTS = INTEGERS + [ 0xFFFFFFFF, 0x123456789 ] SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] STR_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] BLOB_FENCEPOSTS = STR_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] def __init__(self, method_name, connection_string): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string def setUp(self): self.cnxn = pyodbc.connect(self.connection_string) self.cursor = self.cnxn.cursor() # As of libmyodbc5w 5.3 SQLGetTypeInfo returns absurdly small sizes # leading to slow writes. Override them: self.cnxn.maxwrite = 1024 * 1024 * 1024 # My MySQL configuration (and I think the default) sends *everything* # in UTF-8. The pyodbc default is to send Unicode as UTF-16 and to # decode WCHAR via UTF-16. Change them both to UTF-8. self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') self.cnxn.setencoding(encoding='utf-8') for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass for i in range(3): try: self.cursor.execute("drop procedure proc%d" % i) self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, int)) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def _test_strtype(self, sqltype, value, colsize=None): """ The implementation for string and binary tests. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype try: self.cursor.execute(sql) except: print('>>>>', sql) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] # Removing this check for now until I get the charset working properly. # If we use latin1, results are 'str' instead of 'unicode', which would be # correct. Setting charset to ucs-2 causes a crash in SQLGetTypeInfo(SQL_DATETIME). # self.assertEqual(type(v), type(value)) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) # # varchar # def test_varchar_null(self): self._test_strtype('varchar', None, 100) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, max(1, len(value))) return t for value in STR_FENCEPOSTS: locals()['test_varchar_%s' % len(value)] = _maketest(value) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) def test_varchar_upperlatin(self): self._test_strtype('varchar', u'á', colsize=3) def test_utf16(self): self.cursor.execute("create table t1(c1 varchar(100) character set utf16, c2 varchar(100))") self.cursor.execute("insert into t1 values ('test', 'test')") value = "test" row = self.cursor.execute("select c1,c2 from t1").fetchone() for v in row: self.assertEqual(type(v), str) self.assertEqual(v, value) # # binary # def test_null_binary(self): self._test_strtype('varbinary', None, 100) def test_large_null_binary(self): # Bug 1575064 self._test_strtype('varbinary', None, 4000) # Generate a test for each fencepost size: test_binary_0, etc. def _maketest(value): def t(self): self._test_strtype('varbinary', bytes(value, 'utf-8'), max(1, len(value))) return t for value in STR_FENCEPOSTS: locals()['test_binary_%s' % len(value)] = _maketest(value) # # blob # def test_blob_null(self): self._test_strtype('blob', None) # Generate a test for each fencepost size: test_blob_0, etc. def _maketest(value): def t(self): self._test_strtype('blob', bytes(value, 'utf-8')) return t for value in BLOB_FENCEPOSTS: locals()['test_blob_%s' % len(value)] = _maketest(value) def test_blob_upperlatin(self): self._test_strtype('blob', bytes('á', 'utf-8')) # # text # def test_null_text(self): self._test_strtype('text', None) # Generate a test for each fencepost size: test_text_0, etc. def _maketest(value): def t(self): self._test_strtype('text', value) return t for value in STR_FENCEPOSTS: locals()['test_text_%s' % len(value)] = _maketest(value) def test_text_upperlatin(self): self._test_strtype('text', 'á') # # unicode # def test_unicode_query(self): self.cursor.execute(u"select 1") # # bit # # The MySQL driver maps BIT colums to the ODBC bit data type, but they aren't behaving quite like a Boolean value # (which is what the ODBC bit data type really represents). The MySQL BOOL data type is just an alias for a small # integer, so pyodbc can't recognize it and map it back to True/False. # # You can use both BIT and BOOL and they will act as you expect if you treat them as integers. You can write 0 and # 1 to them and they will work. # def test_bit(self): # value = True # self.cursor.execute("create table t1(b bit)") # self.cursor.execute("insert into t1 values (?)", value) # v = self.cursor.execute("select b from t1").fetchone()[0] # self.assertEqual(type(v), bool) # self.assertEqual(v, value) # # def test_bit_string_true(self): # self.cursor.execute("create table t1(b bit)") # self.cursor.execute("insert into t1 values (?)", "xyzzy") # v = self.cursor.execute("select b from t1").fetchone()[0] # self.assertEqual(type(v), bool) # self.assertEqual(v, True) # # def test_bit_string_false(self): # self.cursor.execute("create table t1(b bit)") # self.cursor.execute("insert into t1 values (?)", "") # v = self.cursor.execute("select b from t1").fetchone()[0] # self.assertEqual(type(v), bool) # self.assertEqual(v, False) # # decimal # def test_small_decimal(self): # value = Decimal('1234567890987654321') value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) self.cursor.execute("create table t1(d numeric(19))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_small_decimal_scale(self): # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation # example in the C Data Types appendix. value = '1000.10' value = Decimal(value) self.cursor.execute("create table t1(d numeric(20,6))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_negative_decimal_scale(self): value = Decimal('-10.0010') self.cursor.execute("create table t1(d numeric(19,4))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def _test_inttype(self, datatype, n): self.cursor.execute('create table t1(n %s)' % datatype) self.cursor.execute('insert into t1 values (?)', n) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, n) def _maketest(datatype, value): def t(self): self._test_inttype(datatype, value) return t for value in INTEGERS: name = str(abs(value)) if value < 0: name = 'neg_' + name locals()['test_int_%s' % name] = _maketest('int', value) for value in BIGINTS: name = str(abs(value)) if value < 0: name = 'neg_' + name locals()['test_bigint_%s' % name] = _maketest('bigint', value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_fixed_str(self): value = "testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # date, time, datetime # def test_datetime(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(value, result) def test_date(self): value = date(2001, 1, 1) self.cursor.execute("create table t1(dt date)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), type(value)) self.assertEqual(result, value) # # ints and floats # def test_int(self): value = 1234 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_bigint(self): # This fails on 64-bit Fedora with 5.1. # Should return 0x0123456789 # Does return 0x0000000000 # # Top 4 bytes are returned as 0x00 00 00 00. If the input is high enough, they are returned as 0xFF FF FF FF. input = 0x123456789 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_float(self): value = 1234.5 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) def test_date(self): value = date.today() self.cursor.execute("create table t1(d date)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(value, result) def test_time(self): value = datetime.now().time() # We aren't yet writing values using the new extended time type so the value written to the database is only # down to the second. value = value.replace(microsecond=0) self.cursor.execute("create table t1(t time)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select t from t1").fetchone()[0] self.assertEqual(value, result) # # misc # def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) def test_rowcount_select(self): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount. Databases can return the actual rowcount or they can return -1 if it would help performance. MySQL seems to always return the correct rowcount. """ self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEqual(self.cursor.rowcount, count) rows = self.cursor.fetchall() self.assertEqual(len(rows), count) self.assertEqual(self.cursor.rowcount, count) def test_rowcount_reset(self): "Ensure rowcount is reset to -1" # The Python DB API says that rowcount should be set to -1 and most ODBC drivers let us know there are no # records. MySQL always returns 0, however. Without parsing the SQL (which we are not going to do), I'm not # sure how we can tell the difference and set the value to -1. For now, I'll have this test check for 0. self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.assertEqual(self.cursor.rowcount, 1) self.cursor.execute("create table t2(i int)") self.assertEqual(self.cursor.rowcount, 0) def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [(i, str(i)) for i in range(1, 6)] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_fast_executemany(self): driver_name = self.cnxn.getinfo(pyodbc.SQL_DRIVER_NAME) if driver_name.lower().endswith('a.dll') or driver_name.lower().endswith('a.so'): # skip this test for the ANSI driver # on Windows, it crashes CPython # on Linux, it simply fails return self.cursor.fast_executemany = True self.cursor.execute("create table t1(a int, b varchar(10))") params = [(i, str(i)) for i in range(1, 6)] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_one(self): "Pass executemany a single sequence" self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, "test") ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) # REVIEW: The following fails. Research. # def test_executemany_failure(self): # """ # Ensure that an exception is raised if one query in an executemany fails. # """ # self.cursor.execute("create table t1(a int, b varchar(10))") # # params = [ (1, 'good'), # ('error', 'not an int'), # (3, 'good') ] # # self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(self.connection_string, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def test_emoticons_as_parameter(self): # https://github.com/mkleehammer/pyodbc/issues/423 # # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number # of characters. Ensure it works even with 4-byte characters. # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = "x \U0001F31C z" self.cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") self.cursor.execute("insert into t1 values (?)", v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def test_emoticons_as_literal(self): # https://github.com/mkleehammer/pyodbc/issues/630 v = "x \U0001F31C z" self.cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") self.cursor.execute("insert into t1 values ('%s')" % v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def main(): from optparse import OptionParser parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action="count", default=0, help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: filename = basename(sys.argv[0]) assert filename.endswith('.py') connection_string = load_setup_connection_string(filename[:-3]) if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string) print_library_info(cnxn) cnxn.close() suite = load_tests(MySqlTestCase, options.test, connection_string) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629144457.0 pyodbc-4.0.32/tests3/pgtests.py0000775000175000017500000006753000000000000017731 0ustar00mkleehammermkleehammer#!/usr/bin/env python # -*- coding: utf-8 -*- usage = """\ usage: %prog [options] connection_string Unit tests for PostgreSQL. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. You can also put the connection string into a tmp/setup.cfg file like so: [pgtests] connection-string=DSN=PostgreSQL35W Note: Be sure to use the "Unicode" (not the "ANSI") version of the PostgreSQL ODBC driver. """ import sys import uuid import unittest from decimal import Decimal from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of composed of `seed` to make a string `length` characters long. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = int((length + len(_TESTSTR) - 1) / len(_TESTSTR)) v = _TESTSTR * c return v[:length] class PGTestCase(unittest.TestCase): INTEGERS = [ -1, 0, 1, 0x7FFFFFFF ] BIGINTS = INTEGERS + [ 0xFFFFFFFF, 0x123456789 ] SMALL_READ = 100 LARGE_READ = 4000 SMALL_STRING = _generate_test_string(SMALL_READ) LARGE_STRING = _generate_test_string(LARGE_READ) SMALL_BYTES = bytes(SMALL_STRING, 'utf-8') LARGE_BYTES = bytes(LARGE_STRING, 'utf-8') def __init__(self, connection_string, ansi, method_name): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string self.ansi = ansi def setUp(self): self.cnxn = pyodbc.connect(self.connection_string, ansi=self.ansi) self.cursor = self.cnxn.cursor() # I've set my test database to use UTF-8 which seems most popular. self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') self.cnxn.setencoding(encoding='utf-8') # As of psql 9.5.04 SQLGetTypeInfo returns absurdly small sizes leading # to slow writes. Override them: self.cnxn.maxwrite = 1024 * 1024 * 1024 for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def _simpletest(datatype, value): # A simple test that can be used for any data type where the Python # type we write is also what we expect to receive. def _t(self): self.cursor.execute('create table t1(value %s)' % datatype) self.cursor.execute('insert into t1 values (?)', value) result = self.cursor.execute("select value from t1").fetchone()[0] self.assertEqual(result, value) return _t def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) # def test_gettypeinfo(self): # self.cursor.getTypeInfo(pyodbc.SQL_VARCHAR) # cols = [t[0] for t in self.cursor.description] # print('cols:', cols) # for row in self.cursor: # for col,val in zip(cols, row): # print(' ', col, val) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, int)) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) def _test_strtype(self, sqltype, value, colsize=None, resulttype=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] if resulttype and type(value) is not resulttype: value = resulttype(value) self.assertEqual(result, value) def test_maxwrite(self): # If we write more than `maxwrite` bytes, pyodbc will switch from # binding the data all at once to providing it at execute time with # SQLPutData. The default maxwrite is 1GB so this is rarely needed in # PostgreSQL but I need to test the functionality somewhere. self.cnxn.maxwrite = 300 self._test_strtype('varchar', _generate_test_string(400)) # # VARCHAR # def test_empty_varchar(self): self._test_strtype('varchar', '', self.SMALL_READ) def test_null_varchar(self): self._test_strtype('varchar', None, self.SMALL_READ) def test_large_null_varchar(self): # There should not be a difference, but why not find out? self._test_strtype('varchar', None, self.LARGE_READ) def test_small_varchar(self): self._test_strtype('varchar', self.SMALL_STRING, self.SMALL_READ) def test_large_varchar(self): self._test_strtype('varchar', self.LARGE_STRING, self.LARGE_READ) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3) row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) def test_chinese(self): v = '我的' self.cursor.execute("SELECT N'我的' AS name") row = self.cursor.fetchone() self.assertEqual(row[0], v) self.cursor.execute("SELECT N'我的' AS name") rows = self.cursor.fetchall() self.assertEqual(rows[0][0], v) # # bytea # def test_null_bytea(self): self._test_strtype('bytea', None) def test_small_bytea(self): self._test_strtype('bytea', self.SMALL_BYTES) def test_large_bytea(self): self._test_strtype('bytea', self.LARGE_BYTES) # Now test with bytearray def test_large_bytea_array(self): self._test_strtype('bytea', bytearray(self.LARGE_BYTES), resulttype=bytes) for value in INTEGERS: name = str(value).replace('.', '_').replace('-', 'neg_') locals()['test_int_%s' % name] = _simpletest('int', value) for value in BIGINTS: name = str(value).replace('.', '_').replace('-', 'neg_') locals()['test_bigint_%s' % name] = _simpletest('bigint', value) for value in [-1234.56, -1, 0, 1, 1234.56, 123456789.21]: name = str(value).replace('.', '_').replace('-', 'neg_') locals()['test_money_%s' % name] = _simpletest('money', value) for value in "-1234.56 -1 0 1 1234.56 123456789.21".split(): name = value.replace('.', '_').replace('-', 'neg_') locals()['test_decimal_%s' % name] = _simpletest('decimal(20,6)', Decimal(value)) for value in "-1234.56 -1 0 1 1234.56 123456789.21".split(): name = value.replace('.', '_').replace('-', 'neg_') locals()['test_numeric_%s' % name] = _simpletest('numeric(20,6)', Decimal(value)) def test_small_decimal(self): value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) self.cursor.execute("create table t1(d numeric(19))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_small_decimal_scale(self): # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation # example in the C Data Types appendix. value = '1000.10' value = Decimal(value) self.cursor.execute("create table t1(d numeric(20,6))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_negative_decimal_scale(self): value = Decimal('-10.0010') self.cursor.execute("create table t1(d numeric(19,4))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_nonnative_uuid(self): # The default is False meaning we should return a string. Note that # SQL Server seems to always return uppercase. value = uuid.uuid4() self.cursor.execute("create table t1(n uuid)") self.cursor.execute("insert into t1 values (?)", value) pyodbc.native_uuid = False result = self.cursor.execute("select n from t1").fetchval() self.assertEqual(type(result), str) self.assertEqual(result, str(value).upper()) def test_native_uuid(self): # When true, we should return a uuid.UUID object. value = uuid.uuid4() self.cursor.execute("create table t1(n uuid)") self.cursor.execute("insert into t1 values (?)", value) pyodbc.native_uuid = True result = self.cursor.execute("select n from t1").fetchval() self.assertIsInstance(result, uuid.UUID) self.assertEqual(value, result) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_fixed_str(self): value = "testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def test_fetchval(self): expected = "test" self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", expected) result = self.cursor.execute("select * from t1").fetchval() self.assertEqual(result, expected) def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) def test_rowcount_select(self): self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEqual(self.cursor.rowcount, 4) # PostgreSQL driver fails here? # def test_rowcount_reset(self): # "Ensure rowcount is reset to -1" # # self.cursor.execute("create table t1(i int)") # count = 4 # for i in range(count): # self.cursor.execute("insert into t1 values (?)", i) # self.assertEqual(self.cursor.rowcount, 1) # # self.cursor.execute("create table t2(i int)") # self.assertEqual(self.cursor.rowcount, -1) def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) # REVIEW: Without the cast, we get the following error: # [07006] [unixODBC]Received an unsupported type from Postgres.;\nERROR: table "t2" does not exist (14) count = self.cursor.execute("select cast(count(*) as int) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_fast_executemany(self): self.fast_executemany = True self.cursor.execute("create table t1(a int, b varchar(10))") params = [(i, str(i)) for i in range(1, 6)] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) # REVIEW: Without the cast, we get the following error: [07006] [unixODBC]Received an # unsupported type from Postgres.;\nERROR: table "t2" does not exist (14) count = self.cursor.execute("select cast(count(*) as int) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_cnxn_execute_error(self): """ Make sure that Connection.execute (not Cursor) errors are not "eaten". GitHub issue #74 """ self.cursor.execute("create table t1(a int primary key)") self.cursor.execute("insert into t1 values (1)") self.assertRaises(pyodbc.Error, self.cnxn.execute, "insert into t1 values (1)") def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)") self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(self.connection_string, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def test_exc_integrity(self): "Make sure an IntegretyError is raised" # This is really making sure we are properly encoding and comparing the SQLSTATEs. self.cursor.execute("create table t1(s1 varchar(10) primary key)") self.cursor.execute("insert into t1 values ('one')") self.assertRaises(pyodbc.IntegrityError, self.cursor.execute, "insert into t1 values ('one')") def test_cnxn_set_attr_before(self): # I don't have a getattr right now since I don't have a table telling me what kind of # value to expect. For now just make sure it doesn't crash. # From the unixODBC sqlext.h header file. SQL_ATTR_PACKET_SIZE = 112 othercnxn = pyodbc.connect(self.connection_string, attrs_before={ SQL_ATTR_PACKET_SIZE : 1024 * 32 }) def test_cnxn_set_attr(self): # I don't have a getattr right now since I don't have a table telling me what kind of # value to expect. For now just make sure it doesn't crash. # From the unixODBC sqlext.h header file. SQL_ATTR_ACCESS_MODE = 101 SQL_MODE_READ_ONLY = 1 self.cnxn.set_attr(SQL_ATTR_ACCESS_MODE, SQL_MODE_READ_ONLY) def test_columns(self): # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error # # Error: TypeError: argument 2 must be str, not None # # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an # optional string keyword when calling indirectly. self.cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))") self.cursor.columns('t1') results = {row.column_name: row for row in self.cursor} row = results['a'] assert row.type_name == 'int4', row.type_name row = results['b'] assert row.type_name == 'varchar' assert row.precision == 3, row.precision row = results['xΏz'] assert row.type_name == 'varchar' assert row.precision == 4, row.precision # Now do the same, but specifically pass in None to one of the keywords. Old versions # were parsing arguments incorrectly and would raise an error. (This crops up when # calling indirectly like columns(*args, **kwargs) which aiodbc does.) self.cursor.columns('t1', schema=None, catalog=None) results = {row.column_name: row for row in self.cursor} row = results['a'] assert row.type_name == 'int4', row.type_name row = results['b'] assert row.type_name == 'varchar' assert row.precision == 3 def test_cancel(self): # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with # making sure SQLCancel is called correctly. self.cursor.execute("select 1") self.cursor.cancel() def test_emoticons_as_parameter(self): # https://github.com/mkleehammer/pyodbc/issues/423 # # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number # of characters. Ensure it works even with 4-byte characters. # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = "x \U0001F31C z" self.cursor.execute("CREATE TABLE t1(s varchar(100))") self.cursor.execute("insert into t1 values (?)", v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def test_emoticons_as_literal(self): # https://github.com/mkleehammer/pyodbc/issues/630 v = "x \U0001F31C z" self.cursor.execute("CREATE TABLE t1(s varchar(100))") self.cursor.execute("insert into t1 values ('%s')" % v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def test_cursor_messages(self): """ Test the Cursor.messages attribute. """ # self.cursor is used in setUp, hence is not brand new at this point brand_new_cursor = self.cnxn.cursor() self.assertIsNone(brand_new_cursor.messages) # using INFO message level because they are always sent to the client regardless of # client_min_messages: https://www.postgresql.org/docs/11/runtime-config-client.html for msg in ('hello world', 'ABCDEFGHIJ' * 800): self.cursor.execute(""" CREATE OR REPLACE PROCEDURE test_cursor_messages() LANGUAGE plpgsql AS $$ BEGIN RAISE INFO '{}' USING ERRCODE = '01000'; END; $$; """.format(msg)) self.cursor.execute("CALL test_cursor_messages();") messages = self.cursor.messages self.assertTrue(type(messages) is list) self.assertTrue(len(messages) > 0) self.assertTrue(all(type(m) is tuple for m in messages)) self.assertTrue(all(len(m) == 2 for m in messages)) self.assertTrue(all(type(m[0]) is str for m in messages)) self.assertTrue(all(type(m[1]) is str for m in messages)) self.assertTrue(all(m[0] == '[01000] (-1)' for m in messages)) self.assertTrue(''.join(m[1] for m in messages).endswith(msg)) def test_output_conversion(self): # Note the use of SQL_WVARCHAR, not SQL_VARCHAR. def convert(value): # The value is the raw bytes (as a bytes object) read from the # database. We'll simply add an X at the beginning at the end. return 'X' + value.decode('latin1') + 'X' self.cursor.execute("create table t1(n int, v varchar(10))") self.cursor.execute("insert into t1 values (1, '123.45')") self.cnxn.add_output_converter(pyodbc.SQL_WVARCHAR, convert) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') # Clear all conversions and try again. There should be no Xs this time. self.cnxn.clear_output_converters() value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') # Same but clear using remove_output_converter. self.cnxn.add_output_converter(pyodbc.SQL_WVARCHAR, convert) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') self.cnxn.remove_output_converter(pyodbc.SQL_WVARCHAR) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') # And lastly, clear by passing None for the converter. self.cnxn.add_output_converter(pyodbc.SQL_WVARCHAR, convert) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') self.cnxn.add_output_converter(pyodbc.SQL_WVARCHAR, None) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') def main(): from optparse import OptionParser parser = OptionParser(usage="usage: %prog [options] connection_string") parser.add_option("-v", "--verbose", default=0, action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") parser.add_option('-a', '--ansi', help='ANSI only', default=False, action='store_true') (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('pgtests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string, ansi=options.ansi) print_library_info(cnxn) cnxn.close() if options.test: # Run a single test if not options.test.startswith('test_'): options.test = 'test_%s' % (options.test) s = unittest.TestSuite([ PGTestCase(connection_string, options.ansi, options.test) ]) else: # Run all tests in the class methods = [ m for m in dir(PGTestCase) if m.startswith('test_') ] methods.sort() s = unittest.TestSuite([ PGTestCase(connection_string, options.ansi, m) for m in methods ]) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(s) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629144457.0 pyodbc-4.0.32/tests3/sparktests.py0000664000175000017500000004643000000000000020434 0ustar00mkleehammermkleehammer#!/usr/bin/env python # -*- coding: utf-8 -*- usage = """\ usage: %prog [options] connection_string Unit tests for Apache Spark. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. You can also put the connection string into a tmp/setup.cfg file like so: [sparktests] connection-string=DSN=Spark These tests use Simba Spark ODBC driver. The DSN should be configured with UseNativeQuery=0 to pass the tests. """ import sys import uuid import unittest from decimal import Decimal from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of composed of `seed` to make a string `length` characters long. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = int((length + len(_TESTSTR) - 1) / len(_TESTSTR)) v = _TESTSTR * c return v[:length] class SparkTestCase(unittest.TestCase): INTEGERS = [ -1, 0, 1, 0x7FFFFFFF ] BIGINTS = INTEGERS + [ 0xFFFFFFFF, 0x123456789 ] SMALL_READ = 100 LARGE_READ = 4000 SMALL_STRING = _generate_test_string(SMALL_READ) LARGE_STRING = _generate_test_string(LARGE_READ) SMALL_BYTES = bytes(SMALL_STRING, 'utf-8') LARGE_BYTES = bytes(LARGE_STRING, 'utf-8') def __init__(self, connection_string, ansi, method_name): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string self.ansi = ansi def setUp(self): self.cnxn = pyodbc.connect(self.connection_string, ansi=self.ansi, autocommit=True) self.cursor = self.cnxn.cursor() # I've set my test database to use UTF-8 which seems most popular. self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') self.cnxn.setencoding(encoding='utf-8') # As of psql 9.5.04 SQLGetTypeInfo returns absurdly small sizes leading # to slow writes. Override them: self.cnxn.maxwrite = 1024 * 1024 * 1024 for i in range(3): try: self.cursor.execute("drop table if exists t%d" % i) except: pass def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def _simpletest(datatype, value): # A simple test that can be used for any data type where the Python # type we write is also what we expect to receive. def _t(self): self.cursor.execute('create table t1(value %s)' % datatype) self.cursor.execute('insert into t1 values (?)', value) result = self.cursor.execute("select value from t1").fetchone()[0] self.assertEqual(result, value) return _t def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) # def test_gettypeinfo(self): # self.cursor.getTypeInfo(pyodbc.SQL_VARCHAR) # cols = [t[0] for t in self.cursor.description] # print('cols:', cols) # for row in self.cursor: # for col,val in zip(cols, row): # print(' ', col, val) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, int)) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) def _test_strtype(self, sqltype, value, colsize=None, resulttype=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] if resulttype and type(value) is not resulttype: value = resulttype(value) self.assertEqual(result, value) # # VARCHAR # def test_empty_varchar(self): self._test_strtype('varchar', '', self.SMALL_READ) def test_null_varchar(self): self._test_strtype('varchar', None, self.SMALL_READ) def test_large_null_varchar(self): # There should not be a difference, but why not find out? self._test_strtype('varchar', None, self.LARGE_READ) def test_small_varchar(self): self._test_strtype('varchar', self.SMALL_STRING, self.SMALL_READ) def test_large_varchar(self): self._test_strtype('varchar', self.LARGE_STRING, self.LARGE_READ) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3) row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) def test_chinese(self): v = '我的' self.cursor.execute("SELECT '我的' AS name") row = self.cursor.fetchone() self.assertEqual(row[0], v) self.cursor.execute("SELECT '我的' AS name") rows = self.cursor.fetchall() self.assertEqual(rows[0][0], v) for value in INTEGERS: name = str(value).replace('.', '_').replace('-', 'neg_') locals()['test_int_%s' % name] = _simpletest('int', value) for value in BIGINTS: name = str(value).replace('.', '_').replace('-', 'neg_') locals()['test_bigint_%s' % name] = _simpletest('bigint', value) for value in "-1234.56 -1 0 1 1234.56 123456789.21".split(): name = value.replace('.', '_').replace('-', 'neg_') locals()['test_decimal_%s' % name] = _simpletest('decimal(20,6)', Decimal(value)) for value in "-1234.56 -1 0 1 1234.56 123456789.21".split(): name = value.replace('.', '_').replace('-', 'neg_') locals()['test_numeric_%s' % name] = _simpletest('numeric(20,6)', Decimal(value)) def test_small_decimal(self): value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) self.cursor.execute("create table t1(d numeric(19))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_small_decimal_scale(self): # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation # example in the C Data Types appendix. value = '1000.10' value = Decimal(value) self.cursor.execute("create table t1(d numeric(20,6))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_negative_decimal_scale(self): value = Decimal('-10.0010') self.cursor.execute("create table t1(d numeric(19,4))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_fixed_str(self): value = "testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def test_fetchval(self): expected = "test" self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", expected) result = self.cursor.execute("select * from t1").fetchval() self.assertEqual(result, expected) def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_long_column_name(self): "ensure super long column names are handled correctly." c1 = 'abcdefghij' * 50 c2 = 'klmnopqrst' * 60 self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(c1 int, c2 int)") self.cursor.execute("select c1 as {}, c2 as {} from t1".format(c1, c2)) names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ c1, c2 ]) def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_fast_executemany(self): self.fast_executemany = True self.cursor.execute("create table t1(a int, b varchar(10))") params = [(i, str(i)) for i in range(1, 6)] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)") self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_cnxn_set_attr_before(self): # I don't have a getattr right now since I don't have a table telling me what kind of # value to expect. For now just make sure it doesn't crash. # From the unixODBC sqlext.h header file. SQL_ATTR_PACKET_SIZE = 112 othercnxn = pyodbc.connect(self.connection_string, attrs_before={ SQL_ATTR_PACKET_SIZE : 1024 * 32 }, autocommit=True) def test_cnxn_set_attr(self): # I don't have a getattr right now since I don't have a table telling me what kind of # value to expect. For now just make sure it doesn't crash. # From the unixODBC sqlext.h header file. SQL_ATTR_ACCESS_MODE = 101 SQL_MODE_READ_ONLY = 1 self.cnxn.set_attr(SQL_ATTR_ACCESS_MODE, SQL_MODE_READ_ONLY) def test_columns(self): self.cursor.execute("create table t1(a int, b varchar(3), `xΏz` decimal(8,2))") self.cursor.columns('t1') results = {row.column_name: row for row in self.cursor} row = results['a'] assert row.type_name == 'INT', row.type_name row = results['b'] assert row.type_name == 'VARCHAR' row = results['xΏz'] assert row.type_name == 'DECIMAL' # Now do the same, but specifically pass in None to one of the keywords. Old versions # were parsing arguments incorrectly and would raise an error. (This crops up when # calling indirectly like columns(*args, **kwargs) which aiodbc does.) self.cursor.columns('t1', schema=None, catalog=None) results = {row.column_name: row for row in self.cursor} row = results['a'] assert row.type_name == 'INT', row.type_name row = results['b'] assert row.type_name == 'VARCHAR' def test_cancel(self): # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with # making sure SQLCancel is called correctly. self.cursor.execute("select 1") self.cursor.cancel() def test_emoticons_as_parameter(self): # https://github.com/mkleehammer/pyodbc/issues/423 # # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number # of characters. Ensure it works even with 4-byte characters. # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = "x \U0001F31C z" self.cursor.execute("CREATE TABLE t1(s varchar(100))") self.cursor.execute("insert into t1 values (?)", v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def main(): from optparse import OptionParser parser = OptionParser(usage="usage: %prog [options] connection_string") parser.add_option("-v", "--verbose", default=0, action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") parser.add_option('-a', '--ansi', help='ANSI only', default=False, action='store_true') (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('sparktests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string, ansi=options.ansi) print_library_info(cnxn) cnxn.close() if options.test: # Run a single test if not options.test.startswith('test_'): options.test = 'test_%s' % (options.test) s = unittest.TestSuite([ SparkTestCase(connection_string, options.ansi, options.test) ]) else: # Run all tests in the class methods = [ m for m in dir(SparkTestCase) if m.startswith('test_') ] methods.sort() s = unittest.TestSuite([ SparkTestCase(connection_string, options.ansi, m) for m in methods ]) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(s) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629143953.0 pyodbc-4.0.32/tests3/sqldwtests.py0000664000175000017500000015126300000000000020447 0ustar00mkleehammermkleehammer#!/usr/bin/python # -*- coding: utf-8 -*- x = 1 # Getting an error if starting with usage for some reason. usage = """\ usage: %prog [options] connection_string Unit tests for Azure SQL DW. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. You can also put the connection string into a tmp/setup.cfg file like so: [sqldwtests] connection-string=DRIVER={SQL Server};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db The connection string above will use the 2000/2005 driver, even if SQL Server 2008 is installed: 2000: DRIVER={SQL Server} 2005: DRIVER={SQL Server} 2008: DRIVER={SQL Server Native Client 10.0} If using FreeTDS ODBC, be sure to use version 1.00.97 or newer. """ import sys, os, re, uuid import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import join, getsize, dirname, abspath from warnings import warn from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = int((length + len(_TESTSTR)-1) / len(_TESTSTR)) v = _TESTSTR * c return v[:length] class SqlServerTestCase(unittest.TestCase): SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] STR_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] BYTE_FENCEPOSTS = [ bytes(s, 'ascii') for s in STR_FENCEPOSTS ] IMAGE_FENCEPOSTS = BYTE_FENCEPOSTS + [ bytes(_generate_test_string(size), 'ascii') for size in LARGE_FENCEPOST_SIZES ] def __init__(self, method_name, connection_string): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string def driver_type_is(self, type_name): recognized_types = { 'msodbcsql': '(Microsoft) ODBC Driver xx for SQL Server', 'freetds': 'FreeTDS ODBC', } if not type_name in recognized_types.keys(): raise KeyError('"{0}" is not a recognized driver type: {1}'.format(type_name, list(recognized_types.keys()))) driver_name = self.cnxn.getinfo(pyodbc.SQL_DRIVER_NAME).lower() if type_name == 'msodbcsql': return ('msodbcsql' in driver_name) or ('sqlncli' in driver_name) or ('sqlsrv32.dll' == driver_name) elif type_name == 'freetds': return ('tdsodbc' in driver_name) def get_sqlserver_version(self): """ Returns the major version: 8-->2000, 9-->2005, 10-->2008 """ self.cursor.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') AS VARCHAR(255))") row = self.cursor.fetchone() return int(row[0].split('.', 1)[0]) def setUp(self): self.cnxn = pyodbc.connect(self.connection_string) self.cursor = self.cnxn.cursor() # I (Kleehammer) have been using a latin1 collation. If you have a # different collation, you'll need to update this. If someone knows of # a good way for this to be dynamic, please update. (I suppose we # could maintain a map from collation to encoding?) self.cnxn.setdecoding(pyodbc.SQL_CHAR, 'latin1') for i in range(3): try: self.cursor.execute("drop table t%d" % i) except: pass for i in range(3): try: self.cursor.execute("drop procedure proc%d" % i) except: pass try: self.cursor.execute('drop function func1') except: pass def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def _simpletest(datatype, value): # A simple test that can be used for any data type where the Python # type we write is also what we expect to receive. def _t(self): self.cursor.execute('create table t1(value %s)' % datatype) self.cursor.execute('insert into t1 values (?)', value) result = self.cursor.execute("select value from t1").fetchone()[0] self.assertEqual(result, value) return _t def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, (int, int))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def test_noscan(self): self.assertEqual(self.cursor.noscan, False) self.cursor.noscan = True self.assertEqual(self.cursor.noscan, True) def test_nextset(self): self.cursor.execute("create table t1(i int)") for i in range(4): self.cursor.execute("insert into t1(i) values(?)", i) self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") for i, row in enumerate(self.cursor): self.assertEqual(i, row.i) self.assertEqual(self.cursor.nextset(), True) for i, row in enumerate(self.cursor): self.assertEqual(i + 2, row.i) def test_nextset_with_raiserror(self): self.cursor.execute("select i = 1; RAISERROR('c', 16, 1);") row = next(self.cursor) self.assertEqual(1, row.i) if self.driver_type_is('freetds'): warn('FREETDS_KNOWN_ISSUE - test_nextset_with_raiserror: test cancelled.') # AssertionError: ProgrammingError not raised by nextset # https://github.com/FreeTDS/freetds/issues/230 return # for now self.assertRaises(pyodbc.ProgrammingError, self.cursor.nextset) def test_fixed_unicode(self): value = "t\xebsting" self.cursor.execute("create table t1(s nchar(7))") self.cursor.execute("insert into t1 values(?)", "t\xebsting") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def _test_strtype(self, sqltype, value, resulttype=None, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or isinstance(colsize, int), colsize assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype if colsize >= 2000 and (sqltype == 'nvarchar' or sqltype == 'varchar'): self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 0, 0)]) self.cursor.execute(sql) if resulttype is None: resulttype = type(value) sql = "insert into t1 values(?)" try: self.cursor.execute(sql, value) except pyodbc.DataError: if self.driver_type_is('freetds'): # FREETDS_KNOWN_ISSUE # # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so # pyodbc can't call SQLDescribeParam to get the correct parameter type. # This can lead to errors being returned from SQL Server when sp_prepexec is called, # e.g., "Implicit conversion from data type varchar to varbinary is not allowed." # for test_binary_null # # So at least verify that the user can manually specify the parameter type if sqltype == 'varbinary': sql_param_type = pyodbc.SQL_VARBINARY # (add elif blocks for other cases as required) self.cursor.setinputsizes([(sql_param_type, colsize, 0)]) self.cursor.execute(sql, value) else: raise v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), resulttype) if value is not None: self.assertEqual(len(v), len(value)) # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before # comparing. if type(value) is not resulttype: value = resulttype(value) self.assertEqual(v, value) def _test_strliketype(self, sqltype, value, resulttype=None, colsize=None): """ The implementation for text, image, ntext, and binary. These types do not support comparison operators. """ assert colsize is None or isinstance(colsize, int), colsize assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype if resulttype is None: resulttype = type(value) self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(result), resulttype) # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before # comparing. if type(value) is not resulttype: value = resulttype(value) self.assertEqual(result, value) # # varchar # def test_varchar_null(self): self._test_strtype('varchar', None, colsize=100) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, colsize=len(value)) return t for value in STR_FENCEPOSTS: locals()['test_varchar_%s' % len(value)] = _maketest(value) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) # # nvarchar # def test_unicode_null(self): self._test_strtype('nvarchar', None, colsize=100) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('nvarchar', value, colsize=len(value)) return t for value in STR_FENCEPOSTS: locals()['test_unicode_%s' % len(value)] = _maketest(value) def test_unicode_longmax(self): # Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes ver = self.get_sqlserver_version() if ver < 9: # 2005+ return # so pass / ignore self.cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))") # From issue #206 def _maketest(value): def t(self): self._test_strtype('nvarchar', value, colsize=len(value)) return t locals()['test_chinese_param'] = _maketest('我的') def test_chinese(self): v = '我的' self.cursor.execute(u"SELECT N'我的' AS [Name]") row = self.cursor.fetchone() self.assertEqual(row[0], v) self.cursor.execute(u"SELECT N'我的' AS [Name]") rows = self.cursor.fetchall() self.assertEqual(rows[0][0], v) def test_fast_executemany_to_local_temp_table(self): if self.driver_type_is('freetds'): warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_to_local_temp_table: test cancelled.') return v = 'Ώπα' self.cursor.execute("CREATE TABLE #issue295 (id INT, txt NVARCHAR(50))") sql = "INSERT INTO #issue295 (txt) VALUES (?)" params = [(v,)] self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 50, 0)]) self.cursor.fast_executemany = True self.cursor.executemany(sql, params) self.assertEqual(self.cursor.execute("SELECT txt FROM #issue295").fetchval(), v) # # binary # # def test_binary_null(self): # self._test_strtype('varbinary', None, colsize=100) # bytearray def _maketest(value): def t(self): self._test_strtype('varbinary', bytearray(value), colsize=len(value), resulttype=bytes) return t for value in BYTE_FENCEPOSTS: locals()['test_binary_bytearray_%s' % len(value)] = _maketest(value) # bytes def _maketest(value): def t(self): self._test_strtype('varbinary', bytes(value), colsize=len(value)) return t for value in BYTE_FENCEPOSTS: locals()['test_binary_bytes_%s' % len(value)] = _maketest(value) # bytearray # bytes # # text # # # bit # def test_bit(self): value = True self.cursor.execute("create table t1(b bit)") self.cursor.execute("insert into t1 values (?)", value) v = self.cursor.execute("select b from t1").fetchone()[0] self.assertEqual(type(v), bool) self.assertEqual(v, value) # # decimal # def _decimal(self, precision, scale, negative): # From test provided by planders (thanks!) in Issue 91 self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) # Construct a decimal that uses the maximum precision and scale. decStr = '9' * (precision - scale) if scale: decStr = decStr + "." + '9' * scale if negative: decStr = "-" + decStr value = Decimal(decStr) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(v, value) def _maketest(p, s, n): def t(self): self._decimal(p, s, n) return t for (p, s, n) in [ (1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (38, 38, True) ]: locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) def test_decimal_e(self): """Ensure exponential notation decimals are properly handled""" value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 self.cursor.execute("create table t1(d decimal(10, 2))") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(result, value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_empty_string_encoding(self): self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') value = "" self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_fixed_str(self): value = "testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def test_empty_unicode(self): self.cursor.execute("create table t1(s nvarchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_empty_unicode_encoding(self): self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') value = "" self.cursor.execute("create table t1(s nvarchar(20))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # date, time, datetime # def test_datetime(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(value, result) def test_datetime_fraction(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime # supported is xxx000. value = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(value, result) def test_datetime_fraction_rounded(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the # database supports. full = datetime(2007, 1, 15, 3, 4, 5, 123456) rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", full) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(rounded, result) def test_date(self): ver = self.get_sqlserver_version() if ver < 10: # 2008 only return # so pass / ignore value = date.today() self.cursor.execute("create table t1(d date)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(type(result), date) self.assertEqual(value, result) def test_time(self): ver = self.get_sqlserver_version() if ver < 10: # 2008 only return # so pass / ignore value = datetime.now().time() # We aren't yet writing values using the new extended time type so the value written to the database is only # down to the second. value = value.replace(microsecond=0) self.cursor.execute("create table t1(t time)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select t from t1").fetchone()[0] self.assertEqual(type(result), time) self.assertEqual(value, result) def test_datetime2(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime2)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(value, result) # # ints and floats # def test_int(self): value = 1234 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_bigint(self): input = 3000000000 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_float(self): value = 1234.567 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) # # stored procedures # # def test_callproc(self): # "callproc with a simple input-only stored procedure" # pass def test_sp_results(self): self.cursor.execute( """ Create procedure proc1 AS select top 10 name, id, xtype, refdate from sysobjects """) rows = self.cursor.execute("exec proc1").fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_results_from_temp(self): # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. # If you don't do this, you'd need to call nextset() once to skip it. self.cursor.execute( """ Create procedure proc1 AS set nocount on select top 10 name, id, xtype, refdate into #tmptable from sysobjects select * from #tmptable """) self.cursor.execute("exec proc1") self.assertTrue(self.cursor.description is not None) self.assertTrue(len(self.cursor.description) == 4) rows = self.cursor.fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_with_dates(self): # Reported in the forums that passing two datetimes to a stored procedure doesn't work. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@d1 datetime, @d2 datetime) AS declare @d as int set @d = datediff(year, @d1, @d2) select @d """) self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(rows[0][0] == 0) # 0 years apart def test_sp_with_none(self): # Reported in the forums that passing None caused an error. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@x varchar(20)) AS declare @y varchar(20) set @y = @x select @y """) self.cursor.execute("exec test_sp ?", None) rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(rows[0][0] == None) # 0 years apart # # rowcount # def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) def test_rowcount_select(self): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a select statement, so we'll test for that behavior. This is valid behavior according to the DB API specification, but people don't seem to like it. """ self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEqual(self.cursor.rowcount, -1) rows = self.cursor.fetchall() self.assertEqual(len(rows), count) self.assertEqual(self.cursor.rowcount, -1) def test_rowcount_reset(self): "Ensure rowcount is reset after DDL" ddl_rowcount = 0 if self.driver_type_is('freetds') else -1 self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.assertEqual(self.cursor.rowcount, 1) self.cursor.execute("create table t2(i int)") self.assertEqual(self.cursor.rowcount, ddl_rowcount) # # always return Cursor # # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very # confusing when things went wrong and added very little value even when things went right since users could always # use: cursor.execute("...").rowcount def test_retcursor_delete(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_select(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("select * from t1") self.assertEqual(v, self.cursor) # # misc # def table_with_spaces(self): "Ensure we can select using [x z] syntax" try: self.cursor.execute("create table [test one](int n)") self.cursor.execute("insert into [test one] values(1)") self.cursor.execute("select * from [test one]") v = self.cursor.fetchone()[0] self.assertEqual(v, 1) finally: self.cnxn.rollback() def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_temp_select(self): # A project was failing to create temporary tables via select into. self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(v, "testing") self.cursor.execute("select s into t2 from t1") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(v, "testing") # Money # # The inputs are strings so we don't have to deal with floating point rounding. for value in "-1234.56 -1 0 1 1234.56 123456789.21".split(): name = str(value).replace('.', '_').replace('-', 'neg_') locals()['test_money_%s' % name] = _simpletest('money', Decimal(str(value))) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_one(self): "Pass executemany a single sequence" self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, "test") ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_dae_0(self): """ DAE for 0-length value """ self.cursor.execute("create table t1(a nvarchar(max)) with (heap)") self.cursor.fast_executemany = True self.cursor.executemany("insert into t1(a) values(?)", [['']]) self.assertEqual(self.cursor.execute("select a from t1").fetchone()[0], '') self.cursor.fast_executemany = False def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_concatenation(self): v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() self.assertEqual(row.both, v2 + v3) def test_view_select(self): # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. # Create a table (t1) with 3 rows and a view (t2) into it. self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") for i in range(3): self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) self.cursor.execute("create view t2 as select * from t1") # Select from the view self.cursor.execute("select * from t2") rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(len(rows) == 3) self.cursor.execute("drop view t2") def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(self.connection_string, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def test_skip(self): # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. self.cursor.execute("create table t1(id int)"); for i in range(1, 5): self.cursor.execute("insert into t1 values(?)", i) self.cursor.execute("select id from t1 order by id") self.assertEqual(self.cursor.fetchone()[0], 1) self.cursor.skip(2) self.assertEqual(self.cursor.fetchone()[0], 4) def test_timeout(self): self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) self.cnxn.timeout = 30 self.assertEqual(self.cnxn.timeout, 30) self.cnxn.timeout = 0 self.assertEqual(self.cnxn.timeout, 0) def test_sets_execute(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) self.assertRaises(pyodbc.ProgrammingError, f) def test_sets_executemany(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.executemany("insert into t1 (word) values (?)", [words]) self.assertRaises(TypeError, f) def test_row_execute(self): "Ensure we can use a Row object as a parameter to execute" self.cursor.execute("create table t1(n int, s varchar(10))") self.cursor.execute("insert into t1 values (1, 'a')") row = self.cursor.execute("select n, s from t1").fetchone() self.assertNotEqual(row, None) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.execute("insert into t2 values (?, ?)", row) def test_row_executemany(self): "Ensure we can use a Row object as a parameter to executemany" self.cursor.execute("create table t1(n int, s varchar(10))") for i in range(3): self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) rows = self.cursor.execute("select n, s from t1").fetchall() self.assertNotEqual(len(rows), 0) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.executemany("insert into t2 values (?, ?)", rows) def test_description(self): "Ensure cursor.description is correct" self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") self.cursor.execute("select * from t1") # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the # items I do know. # int t = self.cursor.description[0] self.assertEqual(t[0], 'n') self.assertEqual(t[1], int) self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # varchar(8) t = self.cursor.description[1] self.assertEqual(t[0], 's') self.assertEqual(t[1], str) self.assertEqual(t[4], 8) # precision self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # decimal(5, 2) t = self.cursor.description[2] self.assertEqual(t[0], 'd') self.assertEqual(t[1], Decimal) self.assertEqual(t[4], 5) # precision self.assertEqual(t[5], 2) # scale self.assertEqual(t[6], True) # nullable def test_none_param(self): "Ensure None can be used for params other than the first" # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the # first column, but did not work for later columns. # # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, # binary/varbinary won't allow an implicit conversion. self.cursor.execute("create table t1(n int, blob varbinary(max)) with(heap)") self.cursor.execute("insert into t1 values (1, 0x1234)") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 1) self.assertEqual(type(row.blob), bytes) sql = "update t1 set n=?, blob=?" try: self.cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) self.cursor.execute(sql, 2, None) except pyodbc.DataError: if self.driver_type_is('freetds'): # FREETDS_KNOWN_ISSUE # # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so # pyodbc can't call SQLDescribeParam to get the correct parameter type. # This can lead to errors being returned from SQL Server when sp_prepexec is called, # e.g., "Implicit conversion from data type varchar to varbinary(max) is not allowed." # # So at least verify that the user can manually specify the parameter type self.cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) self.cursor.execute(sql, 2, None) else: raise row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 2) self.assertEqual(row.blob, None) def test_output_conversion(self): def convert(value): # The value is the raw bytes (as a bytes object) read from the # database. We'll simply add an X at the beginning at the end. return 'X' + value.decode('latin1') + 'X' self.cursor.execute("create table t1(n int, v varchar(10))") self.cursor.execute("insert into t1 values (1, '123.45')") self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') # Clear all conversions and try again. There should be no Xs this time. self.cnxn.clear_output_converters() value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') # Same but clear using remove_output_converter. self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') self.cnxn.remove_output_converter(pyodbc.SQL_VARCHAR) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') # And lastly, clear by passing None for the converter. self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') def test_too_large(self): """Ensure error raised if insert fails due to truncation""" value = 'x' * 1000 self.cursor.execute("create table t1(s varchar(800))") def test(): self.cursor.execute("insert into t1 values (?)", value) self.assertRaises(pyodbc.DataError, test) def test_login_timeout(self): # This can only test setting since there isn't a way to cause it to block on the server side. cnxns = pyodbc.connect(self.connection_string, timeout=2) def test_row_equal(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test')") row1 = self.cursor.execute("select n, s from t1").fetchone() row2 = self.cursor.execute("select n, s from t1").fetchone() b = (row1 == row2) self.assertEqual(b, True) def test_row_gtlt(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test1')") self.cursor.execute("insert into t1 values (1, 'test2')") rows = self.cursor.execute("select n, s from t1 order by s").fetchall() self.assertTrue(rows[0] < rows[1]) self.assertTrue(rows[0] <= rows[1]) self.assertTrue(rows[1] > rows[0]) self.assertTrue(rows[1] >= rows[0]) self.assertTrue(rows[0] != rows[1]) rows = list(rows) rows.sort() # uses < def test_context_manager_success(self): "Ensure `with` commits if an exception is not raised" self.cursor.execute("create table t1(n int)") with pyodbc.connect(self.connection_string) as cnxn: cursor = cnxn.cursor() cursor.execute("insert into t1 values (1)") cnxn = None cursor = None rows = self.cursor.execute("select n from t1").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0][0], 1) def test_context_manager_failure(self): "Ensure `with` rolls back if an exception is raised" # We'll insert a row and commit it. Then we'll insert another row followed by an # exception. self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (1)") def _fail(): with pyodbc.connect(self.connection_string) as cnxn: cursor = cnxn.cursor() cursor.execute("insert into t1 values (2)") cursor.execute("delete from bogus") self.assertRaises(pyodbc.Error, _fail) self.cursor.execute("select max(n) from t1") val = self.cursor.fetchval() self.assertEqual(val, 1) def test_untyped_none(self): # From issue 129 value = self.cursor.execute("select ?", None).fetchone()[0] self.assertEqual(value, None) def test_large_update_nodata(self): self.cursor.execute('create table t1(a varbinary(max)) with(heap)') hundredkb = b'x'*100*1024 self.cursor.setinputsizes([(pyodbc.SQL_VARBINARY,0,0)]) self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) def test_no_fetch(self): # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to # confuse the driver. self.cursor.execute('select 1') self.cursor.execute('select 1') self.cursor.execute('select 1') def test_drivers(self): drivers = pyodbc.drivers() self.assertEqual(list, type(drivers)) self.assertTrue(len(drivers) > 0) m = re.search('DRIVER={?([^}]+?)}?;', self.connection_string, re.IGNORECASE) current = m.group(1) self.assertTrue(current in drivers) def test_decode_meta(self): """ Ensure column names with non-ASCII characters are converted using the configured encodings. """ # This is from GitHub issue #190 self.cursor.execute("create table t1(a int)") self.cursor.execute("insert into t1 values (1)") self.cursor.execute('select a as "Tipología" from t1') self.assertEqual(self.cursor.description[0][0], "Tipología") def test_columns(self): # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error # # Error: TypeError: argument 2 must be str, not None # # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an # optional string keyword when calling indirectly. self.cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))") self.cursor.columns('t1') results = {row.column_name: row for row in self.cursor} row = results['a'] assert row.type_name == 'int', row.type_name row = results['b'] assert row.type_name == 'varchar' assert row.column_size == 3 # Now do the same, but specifically pass in None to one of the keywords. Old versions # were parsing arguments incorrectly and would raise an error. (This crops up when # calling indirectly like columns(*args, **kwargs) which aiodbc does.) self.cursor.columns('t1', schema=None, catalog=None) results = {row.column_name: row for row in self.cursor} row = results['a'] assert row.type_name == 'int', row.type_name row = results['b'] assert row.type_name == 'varchar' assert row.column_size == 3 row = results['xΏz'] assert row.type_name == 'varchar' assert row.column_size == 4, row.column_size def test_cancel(self): # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with # making sure SQLCancel is called correctly. self.cursor.execute("select 1") self.cursor.cancel() def test_emoticons(self): # https://github.com/mkleehammer/pyodbc/issues/423 # # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number # of characters. Ensure it works even with 4-byte characters. # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = "x \U0001F31C z" self.cursor.execute("create table t1(s nvarchar(100))") self.cursor.execute("insert into t1 values (?)", v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def main(): from optparse import OptionParser parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action="count", default=0, help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('sqldwtests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string) print_library_info(cnxn) cnxn.close() suite = load_tests(SqlServerTestCase, options.test, connection_string) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629143953.0 pyodbc-4.0.32/tests3/sqlitetests.py0000664000175000017500000006223700000000000020620 0ustar00mkleehammermkleehammer#!/usr/bin/python usage = """\ usage: %prog [options] connection_string Unit tests for SQLite using the ODBC driver from http://www.ch-werner.de/sqliteodbc To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. On Windows, use the 32-bit driver with 32-bit Python and the 64-bit driver with 64-bit Python (regardless of your operating system bitness). These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. You can also put the connection string into a tmp/setup.cfg file like so: [sqlitetests] connection-string=Driver=SQLite3 ODBC Driver;Database=sqlite.db """ import sys, os, re import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import join, getsize, dirname, abspath from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) // len(_TESTSTR) v = _TESTSTR * c return v[:length] class SqliteTestCase(unittest.TestCase): SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] STR_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] BYTE_FENCEPOSTS = [ bytes(s, 'ascii') for s in STR_FENCEPOSTS ] IMAGE_FENCEPOSTS = BYTE_FENCEPOSTS + [ bytes(_generate_test_string(size), 'ascii') for size in LARGE_FENCEPOST_SIZES ] def __init__(self, method_name, connection_string): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string def setUp(self): self.cnxn = pyodbc.connect(self.connection_string) self.cursor = self.cnxn.cursor() for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, int)) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def _test_strtype(self, sqltype, value, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), type(value)) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) # Reported by Andy Hochhaus in the pyodbc group: In 2.1.7 and earlier, a hardcoded length of 255 was used to # determine whether a parameter was bound as a SQL_VARCHAR or SQL_LONGVARCHAR. Apparently SQL Server chokes if # we bind as a SQL_LONGVARCHAR and the target column size is 8000 or less, which is considers just SQL_VARCHAR. # This means binding a 256 character value would cause problems if compared with a VARCHAR column under # 8001. We now use SQLGetTypeInfo to determine the time to switch. # # [42000] [Microsoft][SQL Server Native Client 10.0][SQL Server]The data types varchar and text are incompatible in the equal to operator. self.cursor.execute("select * from t1 where s=?", value) def _test_strliketype(self, sqltype, value, colsize=None): """ The implementation for text, image, ntext, and binary. These types do not support comparison operators. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), type(value)) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) # # text # def test_text_null(self): self._test_strtype('text', None, 100) # Generate a test for each fencepost size: test_text_0, etc. def _maketest(value): def t(self): self._test_strtype('text', value, len(value)) return t for value in STR_FENCEPOSTS: locals()['test_text_%s' % len(value)] = _maketest(value) def test_text_upperlatin(self): self._test_strtype('varchar', 'á') # # blob # def test_null_blob(self): self._test_strtype('blob', None, 100) def test_large_null_blob(self): # Bug 1575064 self._test_strtype('blob', None, 4000) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('blob', value, len(value)) return t for value in BYTE_FENCEPOSTS: locals()['test_blob_%s' % len(value)] = _maketest(value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # ints and floats # def test_int(self): value = 1234 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_bigint(self): input = 3000000000 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_negative_bigint(self): # Issue 186: BIGINT problem on 32-bit architeture input = -430000000 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_float(self): value = 1234.567 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) # # rowcount # # Note: SQLRowCount does not define what the driver must return after a select statement # and says that its value should not be relied upon. The sqliteodbc driver is hardcoded to # return 0 so I've deleted the test. def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very # confusing when things went wrong and added very little value even when things went right since users could always # use: cursor.execute("...").rowcount def test_retcursor_delete(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_select(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("select * from t1") self.assertEqual(v, self.cursor) # # misc # def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_one(self): "Pass executemany a single sequence" self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, "test") ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_view_select(self): # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. # Create a table (t1) with 3 rows and a view (t2) into it. self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") for i in range(3): self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) self.cursor.execute("create view t2 as select * from t1") # Select from the view self.cursor.execute("select * from t2") rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(len(rows) == 3) def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(self.connection_string, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def test_skip(self): # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. self.cursor.execute("create table t1(id int)"); for i in range(1, 5): self.cursor.execute("insert into t1 values(?)", i) self.cursor.execute("select id from t1 order by id") self.assertEqual(self.cursor.fetchone()[0], 1) self.cursor.skip(2) self.assertEqual(self.cursor.fetchone()[0], 4) def test_sets_execute(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) self.assertRaises(pyodbc.ProgrammingError, f) def test_sets_executemany(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.executemany("insert into t1 (word) values (?)", [words]) self.assertRaises(TypeError, f) def test_row_execute(self): "Ensure we can use a Row object as a parameter to execute" self.cursor.execute("create table t1(n int, s varchar(10))") self.cursor.execute("insert into t1 values (1, 'a')") row = self.cursor.execute("select n, s from t1").fetchone() self.assertNotEqual(row, None) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.execute("insert into t2 values (?, ?)", row) def test_row_executemany(self): "Ensure we can use a Row object as a parameter to executemany" self.cursor.execute("create table t1(n int, s varchar(10))") for i in range(3): self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) rows = self.cursor.execute("select n, s from t1").fetchall() self.assertNotEqual(len(rows), 0) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.executemany("insert into t2 values (?, ?)", rows) def test_description(self): "Ensure cursor.description is correct" self.cursor.execute("create table t1(n int, s text)") self.cursor.execute("insert into t1 values (1, 'abc')") self.cursor.execute("select * from t1") # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the # items I do know. # int t = self.cursor.description[0] self.assertEqual(t[0], 'n') self.assertEqual(t[1], int) self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # text t = self.cursor.description[1] self.assertEqual(t[0], 's') self.assertEqual(t[1], str) self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable def test_row_equal(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test')") row1 = self.cursor.execute("select n, s from t1").fetchone() row2 = self.cursor.execute("select n, s from t1").fetchone() b = (row1 == row2) self.assertEqual(b, True) def test_row_gtlt(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test1')") self.cursor.execute("insert into t1 values (1, 'test2')") rows = self.cursor.execute("select n, s from t1 order by s").fetchall() self.assertTrue(rows[0] < rows[1]) self.assertTrue(rows[0] <= rows[1]) self.assertTrue(rows[1] > rows[0]) self.assertTrue(rows[1] >= rows[0]) self.assertTrue(rows[0] != rows[1]) rows = list(rows) rows.sort() # uses < def _test_context_manager(self): # TODO: This is failing, but it may be due to the design of sqlite. I've disabled it # for now until I can research it some more. # WARNING: This isn't working right now. We've set the driver's autocommit to "off", # but that doesn't automatically start a transaction. I'm not familiar enough with the # internals of the driver to tell what is going on, but it looks like there is support # for the autocommit flag. # # I thought it might be a timing issue, like it not actually starting a txn until you # try to do something, but that doesn't seem to work either. I'll leave this in to # remind us that it isn't working yet but we need to contact the SQLite ODBC driver # author for some guidance. with pyodbc.connect(self.connection_string) as cnxn: cursor = cnxn.cursor() cursor.execute("begin") cursor.execute("create table t1(i int)") cursor.execute('rollback') # The connection should be closed now. def test(): cnxn.execute('rollback') self.assertRaises(pyodbc.Error, test) def test_untyped_none(self): # From issue 129 value = self.cursor.execute("select ?", None).fetchone()[0] self.assertEqual(value, None) def test_large_update_nodata(self): self.cursor.execute('create table t1(a blob)') hundredkb = 'x'*100*1024 self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) def test_no_fetch(self): # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to # confuse the driver. self.cursor.execute('select 1') self.cursor.execute('select 1') self.cursor.execute('select 1') def main(): from optparse import OptionParser parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", default=0, action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('sqlitetests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string) print_library_info(cnxn) cnxn.close() suite = load_tests(SqliteTestCase, options.test, connection_string) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1629396312.0 pyodbc-4.0.32/tests3/sqlservertests.py0000775000175000017500000022355700000000000021354 0ustar00mkleehammermkleehammer#!/usr/bin/python # -*- coding: utf-8 -*- x = 1 # Getting an error if starting with usage for some reason. usage = """\ usage: %prog [options] connection_string Unit tests for SQL Server. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. You can also put the connection string into a tmp/setup.cfg file like so: [sqlservertests] connection-string=DRIVER={SQL Server};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db The connection string above will use the 2000/2005 driver, even if SQL Server 2008 is installed: 2000: DRIVER={SQL Server} 2005: DRIVER={SQL Server} 2008: DRIVER={SQL Server Native Client 10.0} If using FreeTDS ODBC, be sure to use version 1.1.23 or newer. """ import sys, os, re, uuid import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import join, getsize, dirname, abspath from warnings import warn from testutils import * # Some tests have fallback code for known driver issues. # Change this value to False to bypass the fallback code, e.g., to see # if a newer version of the driver has fixed the underlying issue. # handle_known_issues = True _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = int((length + len(_TESTSTR)-1) / len(_TESTSTR)) v = _TESTSTR * c return v[:length] class SqlServerTestCase(unittest.TestCase): SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] STR_FENCEPOSTS = [_generate_test_string(size) for size in SMALL_FENCEPOST_SIZES] LARGE_STR_FENCEPOSTS = STR_FENCEPOSTS + [_generate_test_string(size) for size in LARGE_FENCEPOST_SIZES] BYTE_FENCEPOSTS = [ bytes(s, 'ascii') for s in STR_FENCEPOSTS ] IMAGE_FENCEPOSTS = BYTE_FENCEPOSTS + [ bytes(_generate_test_string(size), 'ascii') for size in LARGE_FENCEPOST_SIZES ] def __init__(self, method_name, connection_string): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string def driver_type_is(self, type_name): recognized_types = { 'msodbcsql': '(Microsoft) ODBC Driver xx for SQL Server', 'freetds': 'FreeTDS ODBC', } if not type_name in recognized_types.keys(): raise KeyError('"{0}" is not a recognized driver type: {1}'.format(type_name, list(recognized_types.keys()))) driver_name = self.cnxn.getinfo(pyodbc.SQL_DRIVER_NAME).lower() if type_name == 'msodbcsql': return ('msodbcsql' in driver_name) or ('sqlncli' in driver_name) or ('sqlsrv32.dll' == driver_name) elif type_name == 'freetds': return ('tdsodbc' in driver_name) def handle_known_issues_for(self, type_name, print_reminder=False, failure_crashes_python=False): """ Checks driver `type_name` and "killswitch" variable `handle_known_issues` to see if known issue handling should be bypassed. Optionally prints a reminder message to help identify tests that previously had issues but may have been fixed by a newer version of the driver. Usage examples: # 1. print reminder at beginning of test (before any errors can occur) # def test_some_feature(self): self.handle_known_issues_for('freetds', print_reminder=True) # (continue with test code) # 2. conditional execution of fallback code # try: # (some test code) except pyodbc.DataError: if self.handle_known_issues_for('freetds'): # FREETDS_KNOWN_ISSUE # # (fallback code to work around exception) else: raise """ if self.driver_type_is(type_name): if handle_known_issues or failure_crashes_python: return True else: if print_reminder: print("Known issue handling is disabled. Does this test still fail?") return False def get_sqlserver_version(self): """ Returns the major version: 8-->2000, 9-->2005, 10-->2008 """ self.cursor.execute("exec master..xp_msver 'ProductVersion'") row = self.cursor.fetchone() return int(row.Character_Value.split('.', 1)[0]) def setUp(self): self.cnxn = pyodbc.connect(self.connection_string) self.cursor = self.cnxn.cursor() # I (Kleehammer) have been using a latin1 collation. If you have a # different collation, you'll need to update this. If someone knows of # a good way for this to be dynamic, please update. (I suppose we # could maintain a map from collation to encoding?) self.cnxn.setdecoding(pyodbc.SQL_CHAR, 'latin1') for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass for i in range(3): try: self.cursor.execute("drop procedure proc%d" % i) self.cnxn.commit() except: pass try: self.cursor.execute('drop function func1') self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def _simpletest(datatype, value): # A simple test that can be used for any data type where the Python # type we write is also what we expect to receive. def _t(self): self.cursor.execute('create table t1(value %s)' % datatype) self.cursor.execute('insert into t1 values (?)', value) result = self.cursor.execute("select value from t1").fetchone()[0] self.assertEqual(result, value) return _t def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_drivers(self): p = pyodbc.drivers() self.assertTrue(isinstance(p, list)) def test_datasources(self): p = pyodbc.dataSources() self.assertTrue(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assertTrue(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assertTrue(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assertTrue(isinstance(value, (int, int))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assertTrue(isinstance(value, int)) def test_noscan(self): self.assertEqual(self.cursor.noscan, False) self.cursor.noscan = True self.assertEqual(self.cursor.noscan, True) def test_nonnative_uuid(self): # The default is False meaning we should return a string. Note that # SQL Server seems to always return uppercase. value = uuid.uuid4() self.cursor.execute("create table t1(n uniqueidentifier)") self.cursor.execute("insert into t1 values (?)", value) pyodbc.native_uuid = False result = self.cursor.execute("select n from t1").fetchval() self.assertEqual(type(result), str) self.assertEqual(result, str(value).upper()) def test_native_uuid(self): # When true, we should return a uuid.UUID object. value = uuid.uuid4() self.cursor.execute("create table t1(n uniqueidentifier)") self.cursor.execute("insert into t1 values (?)", value) pyodbc.native_uuid = True result = self.cursor.execute("select n from t1").fetchval() self.assertIsInstance(result, uuid.UUID) self.assertEqual(value, result) def test_nextset(self): self.cursor.execute("create table t1(i int)") for i in range(4): self.cursor.execute("insert into t1(i) values(?)", i) self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") for i, row in enumerate(self.cursor): self.assertEqual(i, row.i) self.assertEqual(self.cursor.nextset(), True) for i, row in enumerate(self.cursor): self.assertEqual(i + 2, row.i) def test_nextset_with_raiserror(self): self.handle_known_issues_for('freetds', print_reminder=True) self.cursor.execute("select i = 1; RAISERROR('c', 16, 1);") row = next(self.cursor) self.assertEqual(1, row.i) if self.handle_known_issues_for('freetds'): warn('FREETDS_KNOWN_ISSUE - test_nextset_with_raiserror: test cancelled.') # AssertionError: ProgrammingError not raised by nextset # https://github.com/FreeTDS/freetds/issues/230 return # for now self.assertRaises(pyodbc.ProgrammingError, self.cursor.nextset) def test_fixed_unicode(self): value = "t\xebsting" self.cursor.execute("create table t1(s nchar(7))") self.cursor.execute("insert into t1 values(?)", "t\xebsting") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def _test_strtype(self, sqltype, value, resulttype=None, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert ( value is None or colsize == -1 or colsize is None or colsize >= len(value) ), colsize if colsize == -1: sql = "create table t1(s %s(max))" % sqltype elif colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) if resulttype is None: resulttype = type(value) sql = "insert into t1 values(?)" try: self.cursor.execute(sql, value) except pyodbc.DataError: if self.handle_known_issues_for('freetds'): # FREETDS_KNOWN_ISSUE # # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so # pyodbc can't call SQLDescribeParam to get the correct parameter type. # This can lead to errors being returned from SQL Server when sp_prepexec is called, # e.g., "Implicit conversion from data type varchar to varbinary is not allowed." # for test_binary_null # # So at least verify that the user can manually specify the parameter type if sqltype == 'varbinary': sql_param_type = pyodbc.SQL_VARBINARY # (add elif blocks for other cases as required) self.cursor.setinputsizes([(sql_param_type, colsize, 0)]) self.cursor.execute(sql, value) else: raise v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), resulttype) if value is not None: self.assertEqual(len(v), len(value)) # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before # comparing. if type(value) is not resulttype: value = resulttype(value) self.assertEqual(v, value) def _test_strliketype(self, sqltype, value, resulttype=None, colsize=None): """ The implementation for text, image, ntext, and binary. These types do not support comparison operators. """ assert colsize is None or isinstance(colsize, int), colsize assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype if resulttype is None: resulttype = type(value) self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(result), resulttype) # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before # comparing. if type(value) is not resulttype: value = resulttype(value) self.assertEqual(result, value) # # varchar # def test_varchar_null(self): self._test_strtype('varchar', None, colsize=100) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, colsize=len(value)) return t for value in STR_FENCEPOSTS: locals()['test_varchar_%s' % len(value)] = _maketest(value) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, colsize=-1) return t for value in LARGE_STR_FENCEPOSTS: locals()['test_varchar_max_%s' % len(value)] = _maketest(value) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) # # nvarchar # def test_unicode_null(self): self._test_strtype('nvarchar', None, colsize=100) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('nvarchar', value, colsize=len(value)) return t for value in STR_FENCEPOSTS: locals()['test_unicode_%s' % len(value)] = _maketest(value) def _maketest(value): def t(self): self._test_strtype('nvarchar', value, colsize=-1) return t for value in LARGE_STR_FENCEPOSTS: locals()['test_unicode_max_%s' % len(value)] = _maketest(value) def test_unicode_longmax(self): # Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes ver = self.get_sqlserver_version() if ver < 9: # 2005+ return # so pass / ignore self.cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))") # From issue #206 def _maketest(value): def t(self): self._test_strtype('nvarchar', value, colsize=len(value)) return t locals()['test_chinese_param'] = _maketest('我的') def test_chinese(self): v = '我的' self.cursor.execute(u"SELECT N'我的' AS [Name]") row = self.cursor.fetchone() self.assertEqual(row[0], v) self.cursor.execute(u"SELECT N'我的' AS [Name]") rows = self.cursor.fetchall() self.assertEqual(rows[0][0], v) def test_fast_executemany_to_local_temp_table(self): if self.handle_known_issues_for('freetds', print_reminder=True, failure_crashes_python=True): warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_to_local_temp_table: test cancelled.') return v = 'Ώπα' self.cursor.execute("CREATE TABLE #issue295 (id INT IDENTITY PRIMARY KEY, txt NVARCHAR(50))") sql = "INSERT INTO #issue295 (txt) VALUES (?)" params = [(v,)] self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 50, 0)]) self.cursor.fast_executemany = True self.cursor.executemany(sql, params) self.assertEqual(self.cursor.execute("SELECT txt FROM #issue295").fetchval(), v) def test_fast_executemany_to_datetime2(self): if self.handle_known_issues_for('freetds', print_reminder=True, failure_crashes_python=True): warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_to_datetime2: test cancelled.') return v = datetime(2019, 3, 12, 10, 0, 0, 123456) self.cursor.execute("CREATE TABLE ##issue540 (dt2 DATETIME2(2))") sql = "INSERT INTO ##issue540 (dt2) VALUES (?)" params = [(v,)] self.cursor.fast_executemany = True self.cursor.executemany(sql, params) self.assertEqual(self.cursor.execute("SELECT CAST(dt2 AS VARCHAR) FROM ##issue540").fetchval(), '2019-03-12 10:00:00.12') def test_fast_executemany_high_unicode(self): if self.handle_known_issues_for('freetds', print_reminder=True, failure_crashes_python=True): warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_high_unicode: test cancelled.') return v = "🎥" self.cursor.fast_executemany = True self.cursor.execute("CREATE TABLE t1 (col1 nvarchar(max) null)") self.cursor.executemany("INSERT INTO t1 (col1) VALUES (?)", [[v,]]) self.assertEqual(self.cursor.execute("SELECT * FROM t1").fetchone()[0], v) # # binary # def test_binary_null(self): self.handle_known_issues_for('freetds', print_reminder=True) self._test_strtype('varbinary', None, colsize=100) # bytearray def _maketest(value): def t(self): self._test_strtype('varbinary', bytearray(value), colsize=len(value), resulttype=bytes) return t for value in BYTE_FENCEPOSTS: locals()['test_binary_bytearray_%s' % len(value)] = _maketest(value) # bytes def _maketest(value): def t(self): self._test_strtype('varbinary', bytes(value), colsize=len(value)) return t for value in BYTE_FENCEPOSTS: locals()['test_binary_bytes_%s' % len(value)] = _maketest(value) # # image # def test_image_null(self): self._test_strliketype('image', None) # bytearray def _maketest(value): def t(self): self._test_strliketype('image', bytearray(value), resulttype=bytes) return t for value in IMAGE_FENCEPOSTS: locals()['test_image_bytearray_%s' % len(value)] = _maketest(value) # bytes def _maketest(value): def t(self): self._test_strliketype('image', bytes(value)) return t for value in IMAGE_FENCEPOSTS: locals()['test_image_bytes_%s' % len(value)] = _maketest(value) # # text # def test_null_text(self): self._test_strliketype('text', None) def _maketest(value): def t(self): self._test_strliketype('text', value) return t for value in STR_FENCEPOSTS: locals()['test_text_%s' % len(value)] = _maketest(value) # # bit # def test_bit(self): value = True self.cursor.execute("create table t1(b bit)") self.cursor.execute("insert into t1 values (?)", value) v = self.cursor.execute("select b from t1").fetchone()[0] self.assertEqual(type(v), bool) self.assertEqual(v, value) # # decimal # def _decimal(self, precision, scale, negative): # From test provided by planders (thanks!) in Issue 91 self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) # Construct a decimal that uses the maximum precision and scale. decStr = '9' * (precision - scale) if scale: decStr = decStr + "." + '9' * scale if negative: decStr = "-" + decStr value = Decimal(decStr) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(v, value) def _maketest(p, s, n): def t(self): self._decimal(p, s, n) return t for (p, s, n) in [ (1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (38, 38, True) ]: locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) def test_decimal_e(self): """Ensure exponential notation decimals are properly handled""" value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 self.cursor.execute("create table t1(d decimal(10, 2))") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(result, value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_empty_string_encoding(self): self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') value = "" self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_fixed_str(self): value = "testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def test_empty_unicode(self): self.cursor.execute("create table t1(s nvarchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_empty_unicode_encoding(self): self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') value = "" self.cursor.execute("create table t1(s nvarchar(20))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(v, value) def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row[0], "1") self.assertEqual(row[-1], "1") def test_version(self): self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # date, time, datetime # def test_datetime(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(value, result) def test_datetime_fraction(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime # supported is xxx000. value = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(value, result) def test_datetime_fraction_rounded(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the # database supports. full = datetime(2007, 1, 15, 3, 4, 5, 123456) rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", full) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(rounded, result) def test_date(self): ver = self.get_sqlserver_version() if ver < 10: # 2008 only return # so pass / ignore value = date.today() self.cursor.execute("create table t1(d date)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(type(result), date) self.assertEqual(value, result) def test_time(self): ver = self.get_sqlserver_version() if ver < 10: # 2008 only return # so pass / ignore value = datetime.now().time() # We aren't yet writing values using the new extended time type so the value written to the database is only # down to the second. value = value.replace(microsecond=0) self.cursor.execute("create table t1(t time)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select t from t1").fetchone()[0] self.assertEqual(type(result), time) self.assertEqual(value, result) def test_datetime2(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime2)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEqual(type(result), datetime) self.assertEqual(value, result) # # ints and floats # def test_int(self): value = 1234 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_bigint(self): input = 3000000000 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_overflow_int(self): # python allows integers of any size, bigger than an 8 byte int can contain input = 9999999999999999999999999999999999999 self.cursor.execute("create table t1(d bigint)") self.cnxn.commit() self.assertRaises(OverflowError, self.cursor.execute, "insert into t1 values (?)", input) result = self.cursor.execute("select * from t1").fetchall() self.assertEqual(result, []) def test_float(self): value = 1234.567 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_denorm_float(self): value = 0.00012345 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(result, value) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) def test_non_numeric_float(self): self.cursor.execute("create table t1(d float)") self.cnxn.commit() for input in (float('+Infinity'), float('-Infinity'), float('NaN')): self.assertRaises(pyodbc.ProgrammingError, self.cursor.execute, "insert into t1 values (?)", input) result = self.cursor.execute("select * from t1").fetchall() self.assertEqual(result, []) # # stored procedures # # def test_callproc(self): # "callproc with a simple input-only stored procedure" # pass def test_sp_results(self): self.cursor.execute( """ Create procedure proc1 AS select top 10 name, id, xtype, refdate from sysobjects """) rows = self.cursor.execute("exec proc1").fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_results_from_temp(self): # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. # If you don't do this, you'd need to call nextset() once to skip it. self.cursor.execute( """ Create procedure proc1 AS set nocount on select top 10 name, id, xtype, refdate into #tmptable from sysobjects select * from #tmptable """) self.cursor.execute("exec proc1") self.assertTrue(self.cursor.description is not None) self.assertTrue(len(self.cursor.description) == 4) rows = self.cursor.fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_results_from_vartbl(self): self.cursor.execute( """ Create procedure proc1 AS set nocount on declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime) insert into @tmptbl select top 10 name, id, xtype, refdate from sysobjects select * from @tmptbl """) self.cursor.execute("exec proc1") rows = self.cursor.fetchall() self.assertEqual(type(rows), list) self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEqual(type(rows[0].refdate), datetime) def test_sp_with_dates(self): # Reported in the forums that passing two datetimes to a stored procedure doesn't work. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@d1 datetime, @d2 datetime) AS declare @d as int set @d = datediff(year, @d1, @d2) select @d """) self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(rows[0][0] == 0) # 0 years apart def test_sp_with_none(self): # Reported in the forums that passing None caused an error. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@x varchar(20)) AS declare @y varchar(20) set @y = @x select @y """) self.cursor.execute("exec test_sp ?", None) rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(rows[0][0] == None) # 0 years apart # # rowcount # def test_rowcount_delete(self): self.assertEqual(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEqual(self.cursor.rowcount, 0) def test_rowcount_select(self): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a select statement, so we'll test for that behavior. This is valid behavior according to the DB API specification, but people don't seem to like it. """ self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEqual(self.cursor.rowcount, -1) rows = self.cursor.fetchall() self.assertEqual(len(rows), count) self.assertEqual(self.cursor.rowcount, -1) def test_rowcount_reset(self): "Ensure rowcount is reset after DDL" ddl_rowcount = 0 if self.driver_type_is('freetds') else -1 self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.assertEqual(self.cursor.rowcount, 1) self.cursor.execute("create table t2(i int)") self.assertEqual(self.cursor.rowcount, ddl_rowcount) # # always return Cursor # # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very # confusing when things went wrong and added very little value even when things went right since users could always # use: cursor.execute("...").rowcount def test_retcursor_delete(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. v = self.cursor.execute("delete from t1") self.assertEqual(v, self.cursor) def test_retcursor_select(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("select * from t1") self.assertEqual(v, self.cursor) # # misc # def table_with_spaces(self): "Ensure we can select using [x z] syntax" try: self.cursor.execute("create table [test one](int n)") self.cursor.execute("insert into [test one] values(1)") self.cursor.execute("select * from [test one]") v = self.cursor.fetchone()[0] self.assertEqual(v, 1) finally: self.cnxn.rollback() def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEqual(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(self.cursor.description, row.cursor_description) def test_temp_select(self): # A project was failing to create temporary tables via select into. self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(v, "testing") self.cursor.execute("select s into t2 from t1") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(v, "testing") # Money # # The inputs are strings so we don't have to deal with floating point rounding. for value in "-1234.56 -1 0 1 1234.56 123456789.21".split(): name = str(value).replace('.', '_').replace('-', 'neg_') locals()['test_money_%s' % name] = _simpletest('money', Decimal(str(value))) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_one(self): "Pass executemany a single sequence" self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, "test") ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_dae_0(self): """ DAE for 0-length value """ self.cursor.execute("create table t1(a nvarchar(max))") self.cursor.fast_executemany = True self.cursor.executemany("insert into t1(a) values(?)", [['']]) self.assertEqual(self.cursor.execute("select a from t1").fetchone()[0], '') self.cursor.fast_executemany = False def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.assertTrue(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.assertTrue(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_concatenation(self): v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() self.assertEqual(row.both, v2 + v3) def test_view_select(self): # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. # Create a table (t1) with 3 rows and a view (t2) into it. self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") for i in range(3): self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) self.cursor.execute("create view t2 as select * from t1") # Select from the view self.cursor.execute("select * from t2") rows = self.cursor.fetchall() self.assertTrue(rows is not None) self.assertTrue(len(rows) == 3) def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(self.connection_string, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def test_sqlserver_callproc(self): try: self.cursor.execute("drop procedure pyodbctest") self.cnxn.commit() except: pass self.cursor.execute("create table t1(s varchar(10))") self.cursor.execute("insert into t1 values(?)", "testing") self.cursor.execute(""" create procedure pyodbctest @var1 varchar(32) as begin select s from t1 return end """) self.cnxn.commit() # for row in self.cursor.procedureColumns('pyodbctest'): # print row.procedure_name, row.column_name, row.column_type, row.type_name self.cursor.execute("exec pyodbctest 'hi'") # print self.cursor.description # for row in self.cursor: # print row.s def test_skip(self): # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. self.cursor.execute("create table t1(id int)"); for i in range(1, 5): self.cursor.execute("insert into t1 values(?)", i) self.cursor.execute("select id from t1 order by id") self.assertEqual(self.cursor.fetchone()[0], 1) self.cursor.skip(2) self.assertEqual(self.cursor.fetchone()[0], 4) def test_timeout(self): self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) self.cnxn.timeout = 30 self.assertEqual(self.cnxn.timeout, 30) self.cnxn.timeout = 0 self.assertEqual(self.cnxn.timeout, 0) def test_sets_execute(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) self.assertRaises(pyodbc.ProgrammingError, f) def test_sets_executemany(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.executemany("insert into t1 (word) values (?)", [words]) self.assertRaises(TypeError, f) def test_row_execute(self): "Ensure we can use a Row object as a parameter to execute" self.cursor.execute("create table t1(n int, s varchar(10))") self.cursor.execute("insert into t1 values (1, 'a')") row = self.cursor.execute("select n, s from t1").fetchone() self.assertNotEqual(row, None) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.execute("insert into t2 values (?, ?)", row) def test_row_executemany(self): "Ensure we can use a Row object as a parameter to executemany" self.cursor.execute("create table t1(n int, s varchar(10))") for i in range(3): self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) rows = self.cursor.execute("select n, s from t1").fetchall() self.assertNotEqual(len(rows), 0) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.executemany("insert into t2 values (?, ?)", rows) def test_description(self): "Ensure cursor.description is correct" self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") self.cursor.execute("select * from t1") # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the # items I do know. # int t = self.cursor.description[0] self.assertEqual(t[0], 'n') self.assertEqual(t[1], int) self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # varchar(8) t = self.cursor.description[1] self.assertEqual(t[0], 's') self.assertEqual(t[1], str) self.assertEqual(t[4], 8) # precision self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # decimal(5, 2) t = self.cursor.description[2] self.assertEqual(t[0], 'd') self.assertEqual(t[1], Decimal) self.assertEqual(t[4], 5) # precision self.assertEqual(t[5], 2) # scale self.assertEqual(t[6], True) # nullable def test_cursor_messages_with_print(self): """ Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement. """ # self.cursor is used in setUp, hence is not brand new at this point brand_new_cursor = self.cnxn.cursor() self.assertIsNone(brand_new_cursor.messages) # SQL Server PRINT statements are never more than 8000 characters # https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks for msg in ('hello world', 'ABCDEFGHIJ' * 800): self.cursor.execute("PRINT '{}'".format(msg)) messages = self.cursor.messages self.assertTrue(type(messages) is list) self.assertEqual(len(messages), 1) self.assertTrue(type(messages[0]) is tuple) self.assertEqual(len(messages[0]), 2) self.assertTrue(type(messages[0][0]) is str) self.assertTrue(type(messages[0][1]) is str) self.assertEqual('[01000] (0)', messages[0][0]) self.assertTrue(messages[0][1].endswith(msg)) def test_cursor_messages_with_stored_proc(self): """ Complex scenario to test the Cursor.messages attribute. """ self.cursor.execute(""" CREATE OR ALTER PROCEDURE test_cursor_messages AS BEGIN SET NOCOUNT ON; PRINT 'Message 1a'; PRINT 'Message 1b'; SELECT N'Field 1a' AS F UNION ALL SELECT N'Field 1b'; SELECT N'Field 2a' AS F UNION ALL SELECT N'Field 2b'; PRINT 'Message 2a'; PRINT 'Message 2b'; END """) # result set 1 self.cursor.execute("EXEC test_cursor_messages") rows = [tuple(r) for r in self.cursor.fetchall()] # convert pyodbc.Row objects for ease of use self.assertEqual(len(rows), 2) self.assertSequenceEqual(rows, [('Field 1a', ), ('Field 1b', )]) self.assertEqual(len(self.cursor.messages), 2) self.assertTrue(self.cursor.messages[0][1].endswith('Message 1a')) self.assertTrue(self.cursor.messages[1][1].endswith('Message 1b')) # result set 2 self.assertTrue(self.cursor.nextset()) rows = [tuple(r) for r in self.cursor.fetchall()] # convert pyodbc.Row objects for ease of use self.assertEqual(len(rows), 2) self.assertSequenceEqual(rows, [('Field 2a', ), ('Field 2b', )]) self.assertEqual(self.cursor.messages, []) # result set 3 self.assertTrue(self.cursor.nextset()) with self.assertRaises(pyodbc.ProgrammingError): self.cursor.fetchall() self.assertEqual(len(self.cursor.messages), 2) self.assertTrue(self.cursor.messages[0][1].endswith('Message 2a')) self.assertTrue(self.cursor.messages[1][1].endswith('Message 2b')) # result set 4 (which shouldn't exist) self.assertFalse(self.cursor.nextset()) with self.assertRaises(pyodbc.ProgrammingError): self.cursor.fetchall() self.assertEqual(self.cursor.messages, []) def test_none_param(self): "Ensure None can be used for params other than the first" # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the # first column, but did not work for later columns. # # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, # binary/varbinary won't allow an implicit conversion. self.handle_known_issues_for('freetds', print_reminder=True) self.cursor.execute("create table t1(n int, blob varbinary(max))") self.cursor.execute("insert into t1 values (1, newid())") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 1) self.assertEqual(type(row.blob), bytes) sql = "update t1 set n=?, blob=?" try: self.cursor.execute(sql, 2, None) except pyodbc.DataError: if self.handle_known_issues_for('freetds'): # FREETDS_KNOWN_ISSUE # # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so # pyodbc can't call SQLDescribeParam to get the correct parameter type. # This can lead to errors being returned from SQL Server when sp_prepexec is called, # e.g., "Implicit conversion from data type varchar to varbinary(max) is not allowed." # # So at least verify that the user can manually specify the parameter type self.cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) self.cursor.execute(sql, 2, None) else: raise row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 2) self.assertEqual(row.blob, None) def test_output_conversion(self): def convert1(value): # The value is the raw bytes (as a bytes object) read from the # database. We'll simply add an X at the beginning at the end. return 'X' + value.decode('latin1') + 'X' def convert2(value): # Same as above, but add a Y at the beginning at the end. return 'Y' + value.decode('latin1') + 'Y' self.cursor.execute("create table t1(n int, v varchar(10))") self.cursor.execute("insert into t1 values (1, '123.45')") self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') # Clear all conversions and try again. There should be no Xs this time. self.cnxn.clear_output_converters() value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') # Same but clear using remove_output_converter. self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') self.cnxn.remove_output_converter(pyodbc.SQL_VARCHAR) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') # Clear via add_output_converter, passing None for the converter function. self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') # retrieve and temporarily replace converter (get_output_converter) # # case_1: converter already registered self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') prev_converter = self.cnxn.get_output_converter(pyodbc.SQL_VARCHAR) self.assertNotEqual(prev_converter, None) self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'Y123.45Y') self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') # # case_2: no converter already registered self.cnxn.clear_output_converters() value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') prev_converter = self.cnxn.get_output_converter(pyodbc.SQL_VARCHAR) self.assertEqual(prev_converter, None) self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'Y123.45Y') self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') def test_too_large(self): """Ensure error raised if insert fails due to truncation""" value = 'x' * 1000 self.cursor.execute("create table t1(s varchar(800))") def test(): self.cursor.execute("insert into t1 values (?)", value) # different versions of SQL Server generate different errors self.assertRaises((pyodbc.DataError, pyodbc.ProgrammingError), test) def test_geometry_null_insert(self): def convert(value): return value self.cnxn.add_output_converter(-151, convert) # -151 is SQL Server's geometry self.cursor.execute("create table t1(n int, v geometry)") self.cursor.execute("insert into t1 values (?, ?)", 1, None) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, None) self.cnxn.clear_output_converters() def test_login_timeout(self): # This can only test setting since there isn't a way to cause it to block on the server side. cnxns = pyodbc.connect(self.connection_string, timeout=2) def test_row_equal(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test')") row1 = self.cursor.execute("select n, s from t1").fetchone() row2 = self.cursor.execute("select n, s from t1").fetchone() b = (row1 == row2) self.assertEqual(b, True) def test_row_gtlt(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test1')") self.cursor.execute("insert into t1 values (1, 'test2')") rows = self.cursor.execute("select n, s from t1 order by s").fetchall() self.assertTrue(rows[0] < rows[1]) self.assertTrue(rows[0] <= rows[1]) self.assertTrue(rows[1] > rows[0]) self.assertTrue(rows[1] >= rows[0]) self.assertTrue(rows[0] != rows[1]) rows = list(rows) rows.sort() # uses < def test_context_manager_success(self): "Ensure `with` commits if an exception is not raised" self.cursor.execute("create table t1(n int)") self.cnxn.commit() with self.cnxn: self.cursor.execute("insert into t1 values (1)") rows = self.cursor.execute("select n from t1").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0][0], 1) def test_context_manager_failure(self): "Ensure `with` rolls back if an exception is raised" # We'll insert a row and commit it. Then we'll insert another row followed by an # exception. self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (1)") self.cnxn.commit() def _fail(): with self.cnxn: self.cursor.execute("insert into t1 values (2)") self.cursor.execute("delete from bogus") self.assertRaises(pyodbc.Error, _fail) self.cursor.execute("select max(n) from t1") val = self.cursor.fetchval() self.assertEqual(val, 1) def test_untyped_none(self): # From issue 129 value = self.cursor.execute("select ?", None).fetchone()[0] self.assertEqual(value, None) def test_large_update_nodata(self): self.cursor.execute('create table t1(a varbinary(max))') hundredkb = b'x'*100*1024 self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) def test_func_param(self): self.cursor.execute(''' create function func1 (@testparam varchar(4)) returns @rettest table (param varchar(4)) as begin insert @rettest select @testparam return end ''') self.cnxn.commit() value = self.cursor.execute("select * from func1(?)", 'test').fetchone()[0] self.assertEqual(value, 'test') def test_no_fetch(self): # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to # confuse the driver. self.cursor.execute('select 1') self.cursor.execute('select 1') self.cursor.execute('select 1') def test_drivers(self): drivers = pyodbc.drivers() self.assertEqual(list, type(drivers)) self.assertTrue(len(drivers) > 0) m = re.search('DRIVER={?([^}]+?)}?;', self.connection_string, re.IGNORECASE) current = m.group(1) self.assertTrue(current in drivers) def test_decode_meta(self): """ Ensure column names with non-ASCII characters are converted using the configured encodings. """ # This is from GitHub issue #190 self.cursor.execute("create table t1(a int)") self.cursor.execute("insert into t1 values (1)") self.cursor.execute('select a as "Tipología" from t1') self.assertEqual(self.cursor.description[0][0], "Tipología") def test_exc_integrity(self): "Make sure an IntegretyError is raised" # This is really making sure we are properly encoding and comparing the SQLSTATEs. self.cursor.execute("create table t1(s1 varchar(10) primary key)") self.cursor.execute("insert into t1 values ('one')") self.assertRaises(pyodbc.IntegrityError, self.cursor.execute, "insert into t1 values ('one')") def test_columns(self): # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error # # Error: TypeError: argument 2 must be str, not None # # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an # optional string keyword when calling indirectly. self.cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))") self.cursor.columns('t1') results = {row.column_name: row for row in self.cursor} row = results['a'] assert row.type_name == 'int', row.type_name row = results['b'] assert row.type_name == 'varchar' assert row.column_size == 3 # Now do the same, but specifically pass in None to one of the keywords. Old versions # were parsing arguments incorrectly and would raise an error. (This crops up when # calling indirectly like columns(*args, **kwargs) which aiodbc does.) self.cursor.columns('t1', schema=None, catalog=None) results = {row.column_name: row for row in self.cursor} row = results['a'] assert row.type_name == 'int', row.type_name row = results['b'] assert row.type_name == 'varchar' assert row.column_size == 3 row = results['xΏz'] assert row.type_name == 'varchar' assert row.column_size == 4, row.column_size # for i in range(8, 16): table_name = 'pyodbc_89abcdef'[:i] self.cursor.execute("""\ IF OBJECT_ID (N'{0}', N'U') IS NOT NULL DROP TABLE {0}; CREATE TABLE {0} (id INT PRIMARY KEY); """.format(table_name)) col_count = len([col.column_name for col in self.cursor.columns(table_name)]) # print('table [{}] ({} characters): {} columns{}'.format(table_name, i, col_count, ' <-' if col_count == 0 else '')) self.assertEqual(col_count, 1) self.cursor.execute("DROP TABLE {};".format(table_name)) # def test_cancel(self): # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with # making sure SQLCancel is called correctly. self.cursor.execute("select 1") self.cursor.cancel() def test_emoticons_as_parameter(self): # https://github.com/mkleehammer/pyodbc/issues/423 # # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number # of characters. Ensure it works even with 4-byte characters. # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm v = "x \U0001F31C z" self.cursor.execute("create table t1(s nvarchar(100))") self.cursor.execute("insert into t1 values (?)", v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def test_emoticons_as_literal(self): # similar to `test_emoticons_as_parameter`, above, except for Unicode literal # # http://www.fileformat.info/info/unicode/char/1f31c/index.htm # FreeTDS ODBC issue fixed in version 1.1.23 # https://github.com/FreeTDS/freetds/issues/317 v = "x \U0001F31C z" self.cursor.execute("create table t1(s nvarchar(100))") self.cursor.execute("insert into t1 values (N'%s')" % v) result = self.cursor.execute("select s from t1").fetchone()[0] self.assertEqual(result, v) def _test_tvp(self, diff_schema): # https://github.com/mkleehammer/pyodbc/issues/290 # # pyodbc supports queries with table valued parameters in sql server # if self.handle_known_issues_for('freetds', print_reminder=True): warn('FREETDS_KNOWN_ISSUE - test_tvp: test cancelled.') return procname = 'SelectTVP' typename = 'TestTVP' if diff_schema: schemaname = 'myschema' procname = schemaname + '.' + procname typenameonly = typename typename = schemaname + '.' + typename # (Don't use "if exists" since older SQL Servers don't support it.) try: self.cursor.execute("drop procedure " + procname) except: pass try: self.cursor.execute("drop type " + typename) except: pass if diff_schema: try: self.cursor.execute("drop schema " + schemaname) except: pass self.cursor.commit() if diff_schema: self.cursor.execute("CREATE SCHEMA myschema") self.cursor.commit() query = "CREATE TYPE %s AS TABLE("\ "c01 VARCHAR(255),"\ "c02 VARCHAR(MAX),"\ "c03 VARBINARY(255),"\ "c04 VARBINARY(MAX),"\ "c05 BIT,"\ "c06 DATE,"\ "c07 TIME,"\ "c08 DATETIME2(5),"\ "c09 BIGINT,"\ "c10 FLOAT,"\ "c11 NUMERIC(38, 24),"\ "c12 UNIQUEIDENTIFIER)" % typename self.cursor.execute(query) self.cursor.commit() self.cursor.execute("CREATE PROCEDURE %s @TVP %s READONLY AS SELECT * FROM @TVP;" % (procname, typename)) self.cursor.commit() long_string = '' long_bytearray = [] for i in range(255): long_string += chr((i % 95) + 32) long_bytearray.append(i % 255) very_long_string = '' very_long_bytearray = [] for i in range(2000000): very_long_string += chr((i % 95) + 32) very_long_bytearray.append(i % 255) c01 = ['abc', '', long_string] c02 = ['abc', '', very_long_string] c03 = [bytearray([0xD1, 0xCE, 0xFA, 0xCE]), bytearray([0x00, 0x01, 0x02, 0x03, 0x04]), bytearray(long_bytearray)] c04 = [bytearray([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), bytearray([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), bytearray(very_long_bytearray)] c05 = [1, 0, 1] c06 = [date(1997, 8, 29), date(1, 1, 1), date(9999, 12, 31)] c07 = [time(9, 13, 39), time(0, 0, 0), time(23, 59, 59)] c08 = [datetime(2018, 11, 13, 13, 33, 26, 298420), datetime(1, 1, 1, 0, 0, 0, 0), datetime(9999, 12, 31, 23, 59, 59, 999990)] c09 = [1234567, -9223372036854775808, 9223372036854775807] c10 = [3.14, -1.79E+308, 1.79E+308] c11 = [Decimal('31234567890123.141243449787580175325274'), Decimal( '0.000000000000000000000001'), Decimal('99999999999999.999999999999999999999999')] c12 = ['4FE34A93-E574-04CC-200A-353F0D1770B1', '33F7504C-2BAC-1B83-01D1-7434A7BA6A17', 'FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF'] param_array = [] for i in range (3): param_array.append([c01[i], c02[i], c03[i], c04[i], c05[i], c06[i], c07[i], c08[i], c09[i], c10[i], c11[i], c12[i]]) success = True try: p1 = [param_array] if diff_schema: p1 = [ [ typenameonly, schemaname ] + param_array ] result_array = self.cursor.execute("exec %s ?" % procname, p1).fetchall() except Exception as ex: print("Failed to execute SelectTVP") print("Exception: [" + type(ex).__name__ + "]" , ex.args) success = False else: for r in range(len(result_array)): for c in range(len(result_array[r])): if(result_array[r][c] != param_array[r][c]): print("Mismatch at row " + str(r+1) + ", column " + str(c+1) + "; expected:", param_array[r][c] , " received:", result_array[r][c]) success = False try: p1 = [[]] if diff_schema: p1 = [ [ typenameonly, schemaname ] + [] ] result_array = self.cursor.execute("exec %s ?" % procname, p1).fetchall() self.assertEqual(result_array, []) except Exception as ex: print("Failed to execute SelectTVP") print("Exception: [" + type(ex).__name__ + "]", ex.args) success = False self.assertEqual(success, True) def test_columns(self): self.cursor.execute( """ create table t1(n int, d datetime, c nvarchar(100)) """) self.cursor.columns(table='t1') names = {row.column_name for row in self.cursor.fetchall()} assert names == {'n', 'd', 'c'}, 'names=%r' % names self.cursor.columns(table='t1', column='c') row = self.cursor.fetchone() assert row.column_name == 'c' def test_tvp(self): self._test_tvp(False) def test_tvp_diffschema(self): self._test_tvp(True) def main(): from optparse import OptionParser parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action="count", default=0, help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('sqlservertests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string) print_library_info(cnxn) cnxn.close() suite = load_tests(SqlServerTestCase, options.test, connection_string) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) return result if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc sys.exit(0 if main().wasSuccessful() else 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests3/test.py0000664000175000017500000000051500000000000017202 0ustar00mkleehammermkleehammer from testutils import * add_to_path() import pyodbc cnxn = pyodbc.connect("DRIVER={SQL Server Native Client 10.0};SERVER=localhost;DATABASE=test;Trusted_Connection=yes") print('cnxn:', cnxn) cursor = cnxn.cursor() print('cursor:', cursor) cursor.execute("select 1") row = cursor.fetchone() print('row:', row) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/tests3/testbase.py0000664000175000017500000000146300000000000020040 0ustar00mkleehammermkleehammer import unittest _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) / len(_TESTSTR) v = _TESTSTR * c return v[:length] class TestBase(unittest.TestCase): ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1625348714.0 pyodbc-4.0.32/tests3/testutils.py0000664000175000017500000000767300000000000020277 0ustar00mkleehammermkleehammerimport os, sys, platform from os.path import join, dirname, abspath, basename import unittest from distutils.util import get_platform def add_to_path(): """ Prepends the build directory to the path so that newly built pyodbc libraries are used, allowing it to be tested without installing it. """ # Put the build directory into the Python path so we pick up the version we just built. # # To make this cross platform, we'll search the directories until we find the .pyd file. import imp library_exts = [ t[0] for t in imp.get_suffixes() if t[-1] == imp.C_EXTENSION ] library_names = [ 'pyodbc%s' % ext for ext in library_exts ] # Only go into directories that match our version number. dir_suffix = '%s-%s.%s' % (get_platform(), sys.version_info[0], sys.version_info[1]) build = join(dirname(dirname(abspath(__file__))), 'build') for root, dirs, files in os.walk(build): for d in dirs[:]: if not d.endswith(dir_suffix): dirs.remove(d) for name in library_names: if name in files: sys.path.insert(0, root) print('Library:', join(root, name)) return print('Did not find the pyodbc library in the build directory. Will use an installed version.') def print_library_info(cnxn): import pyodbc print('python: %s' % sys.version) print('pyodbc: %s %s' % (pyodbc.version, os.path.abspath(pyodbc.__file__))) print('odbc: %s' % cnxn.getinfo(pyodbc.SQL_ODBC_VER)) print('driver: %s %s' % (cnxn.getinfo(pyodbc.SQL_DRIVER_NAME), cnxn.getinfo(pyodbc.SQL_DRIVER_VER))) print(' supports ODBC version %s' % cnxn.getinfo(pyodbc.SQL_DRIVER_ODBC_VER)) print('os: %s' % platform.system()) print('unicode: Py_Unicode=%s SQLWCHAR=%s' % (pyodbc.UNICODE_SIZE, pyodbc.SQLWCHAR_SIZE)) cursor = cnxn.cursor() for typename in ['VARCHAR', 'WVARCHAR', 'BINARY']: t = getattr(pyodbc, 'SQL_' + typename) cursor.getTypeInfo(t) row = cursor.fetchone() print('Max %s = %s' % (typename, row and row[2] or '(not supported)')) if platform.system() == 'Windows': print(' %s' % ' '.join([s for s in platform.win32_ver() if s])) def load_tests(testclass, name, *args): """ Returns a TestSuite for tests in `testclass`. name Optional test name if you only want to run 1 test. If not provided all tests in `testclass` will be loaded. args Arguments for the test class constructor. These will be passed after the test method name. """ if name: if not name.startswith('test_'): name = 'test_%s' % name names = [ name ] else: names = [ method for method in dir(testclass) if method.startswith('test_') ] return unittest.TestSuite([ testclass(name, *args) for name in names ]) def load_setup_connection_string(section): """ Attempts to read the default connection string from the setup.cfg file. If the file does not exist or if it exists but does not contain the connection string, None is returned. If the file exists but cannot be parsed, an exception is raised. """ from os.path import exists, join, dirname, splitext, basename from configparser import SafeConfigParser FILENAME = 'setup.cfg' KEY = 'connection-string' path = dirname(abspath(__file__)) while True: fqn = join(path, 'tmp', FILENAME) if exists(fqn): break parent = dirname(path) print('{} --> {}'.format(path, parent)) if parent == path: return None path = parent try: p = SafeConfigParser() p.read(fqn) except: raise SystemExit('Unable to parse %s: %s' % (path, sys.exc_info()[1])) if p.has_option(section, KEY): return p.get(section, KEY) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1629404586.3039672 pyodbc-4.0.32/utils/0000775000175000017500000000000000000000000015563 5ustar00mkleehammermkleehammer././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/utils/build-releases.cmd0000664000175000017500000000023500000000000021150 0ustar00mkleehammermkleehammerrem Run this from the project root. del dist\* for /D %%d in (venv*) do cmd /c "%%d\Scripts\activate.bat & setup clean -a bdist_wheel & deactivate" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1619054768.0 pyodbc-4.0.32/utils/build-releases.sh0000775000175000017500000000053300000000000021023 0ustar00mkleehammermkleehammer#!/bin/bash # Run this from the root. # Delete old builds to make it easy to upload "dist/*" with twine. rm dist/* # Make the source build. python setup.py sdist # Make a wheel build for each virtual environment we find. for d in venv*/; do source ${d}bin/activate python setup.py clean -a bdist_wheel deactivate done ls -l dist