diff --git a/.flake8 b/.flake8 index 6368ef0f..d468630b 100644 --- a/.flake8 +++ b/.flake8 @@ -4,4 +4,6 @@ ignore = E221, # multi spaces before op - I line up assignments often E401, - # multiple imports on one line \ No newline at end of file + # multiple imports on one line + E722, + # ignore bare except in tests diff --git a/.github/workflows/artifacts_build.yml b/.github/workflows/artifacts_build.yml index 0b610ca1..e7e9f00f 100644 --- a/.github/workflows/artifacts_build.yml +++ b/.github/workflows/artifacts_build.yml @@ -32,6 +32,8 @@ jobs: strategy: matrix: # https://docs.github.com/en/actions/using-jobs/choosing-the-runner-for-a-job + # ubuntu-20.04 supports more versions of Python than ubuntu-22.04 + # https://raw.githubusercontent.com/actions/python-versions/main/versions-manifest.json os: [windows-2019, macos-11, ubuntu-20.04] steps: @@ -45,14 +47,15 @@ jobs: platforms: all - name: Build wheels - uses: pypa/cibuildwheel@v2.11.2 + uses: pypa/cibuildwheel@v2.13.1 # https://cibuildwheel.readthedocs.io/en/stable/options/#options-summary env: # Windows - both 64-bit and 32-bit builds CIBW_ARCHS_WINDOWS: "AMD64 x86" - # macOS - both Intel and ARM builds; no bundled libraries - CIBW_ARCHS_MACOS: "x86_64 arm64" + # macOS - just Intel build (ARM doesn't work); no bundled libraries + # https://cibuildwheel.readthedocs.io/en/stable/faq/#how-to-cross-compile + CIBW_ARCHS_MACOS: x86_64 # prevent the addition of unixODBC dylibs to the wheel by simply not calling the repair CIBW_REPAIR_WHEEL_COMMAND_MACOS: "" diff --git a/.github/workflows/ubuntu_build.yml b/.github/workflows/ubuntu_build.yml index 251f7bbc..37825289 100644 --- a/.github/workflows/ubuntu_build.yml +++ b/.github/workflows/ubuntu_build.yml @@ -5,26 +5,14 @@ on: [push, pull_request] jobs: run_tests: name: Run tests on Python ${{ matrix.python-version }} + # ubuntu-20.04 supports more versions of Python than ubuntu-22.04 + # https://raw.githubusercontent.com/actions/python-versions/main/versions-manifest.json runs-on: ubuntu-20.04 strategy: fail-fast: false matrix: - include: - - python-version: "2.7" - tests-dir: tests2 - - python-version: "3.6" - tests-dir: tests3 - - python-version: "3.7" - tests-dir: tests3 - - python-version: "3.8" - tests-dir: tests3 - - python-version: "3.9" - tests-dir: tests3 - - python-version: "3.10" - tests-dir: tests3 - - python-version: "3.11" - tests-dir: tests3 + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] services: @@ -161,15 +149,16 @@ jobs: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Install Python dependencies + - name: Install Python dev dependencies + # pyodbc doesn't have any Python dependencies, but we do need pytest for testing. run: | cd "$GITHUB_WORKSPACE" python -m pip install --upgrade pip - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + if [ -f requirements-dev.txt ]; then pip install -r requirements-dev.txt; fi - name: Build and install pyodbc run: | @@ -185,22 +174,30 @@ jobs: echo "*** pyodbc drivers" python -c "import pyodbc; print('\n'.join(sorted(pyodbc.drivers())))" - - name: Run SQL Server 2017 tests + - name: Run PostgreSQL tests + env: + PYODBC_POSTGRESQL: "DRIVER={PostgreSQL Unicode};SERVER=localhost;PORT=5432;UID=postgres_user;PWD=postgres_pwd;DATABASE=test" run: | cd "$GITHUB_WORKSPACE" - python "./${{ matrix.tests-dir }}/sqlservertests.py" "DRIVER={ODBC Driver 17 for SQL Server};SERVER=localhost,1401;UID=sa;PWD=StrongPassword2017;DATABASE=test" + pytest "./tests/postgresql_test.py" - - name: Run SQL Server 2019 tests + - name: Run MySQL tests + env: + PYODBC_MYSQL: "DRIVER={MySQL ODBC 8.0 ANSI Driver};SERVER=localhost;UID=root;PWD=root;DATABASE=test;CHARSET=utf8mb4" run: | cd "$GITHUB_WORKSPACE" - python "./${{ matrix.tests-dir }}/sqlservertests.py" "DRIVER={ODBC Driver 18 for SQL Server};SERVER=localhost,1402;UID=sa;PWD=StrongPassword2019;DATABASE=test;Encrypt=Optional" + pytest "./tests/mysql_test.py" - - name: Run PostgreSQL tests + - name: Run SQL Server 2017 tests + env: + PYODBC_SQLSERVER: "DRIVER={ODBC Driver 17 for SQL Server};SERVER=localhost,1401;UID=sa;PWD=StrongPassword2017;DATABASE=test" run: | cd "$GITHUB_WORKSPACE" - python "./${{ matrix.tests-dir }}/pgtests.py" "DRIVER={PostgreSQL Unicode};SERVER=localhost;PORT=5432;UID=postgres_user;PWD=postgres_pwd;DATABASE=test" + python "./tests/sqlserver_test.py" - - name: Run MySQL tests + - name: Run SQL Server 2019 tests + env: + PYODBC_SQLSERVER: "DRIVER={ODBC Driver 18 for SQL Server};SERVER=localhost,1402;UID=sa;PWD=StrongPassword2019;DATABASE=test;Encrypt=Optional" run: | cd "$GITHUB_WORKSPACE" - python "./${{ matrix.tests-dir }}/mysqltests.py" "DRIVER={MySQL ODBC 8.0 ANSI Driver};SERVER=localhost;UID=root;PWD=root;DATABASE=test;CHARSET=utf8mb4" + python "./tests/sqlserver_test.py" diff --git a/HACKING.md b/HACKING.md new file mode 100644 index 00000000..cfed9e0a --- /dev/null +++ b/HACKING.md @@ -0,0 +1,28 @@ + + +# Development Testing + +We use tox for complete testing, but when you are in the middle of development you need fast +turn around. In this mode you need to be able to build and run tests using pytest manually. +To do this, build from the root of the directory using `--inplace` which will build the library +into the root. Run pytest from the same root directory and the new pyodbc library you built +will be in the path for your test: + + python setup.py build_ext --inplace + pytest test/test_postgresql.py -vxk test_text + +If a segmentation fault occurs while running tests, pytest will have eaten the output. Add +-s to the command line: + + python setup.py build_ext --inplace -D PYODBC_TRACE + pytest test/test_postgresql.py -vxk test_text -vs + + +# Notes + +## uint16_t + +You'll notice we use uint16_t instead of SQLWCHAR. The unixODBC headers would define SQLWCHAR +as wchar_t even when wchar_t as defined by the C library as uint32_t. The data in the buffer +was still 16 bit however. + diff --git a/README.md b/README.md index 53540f37..8079eb2d 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,10 @@ # pyodbc -[![AppVeyor](https://ci.appveyor.com/api/projects/status/github/mkleehammer/pyodbc?branch=master&svg=true&passingText=Windows%20build&failingText=Windows%20build)](https://ci.appveyor.com/project/mkleehammer/pyodbc) -[![Github Actions - Ubuntu Build](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml/badge.svg?branch=master)](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml) +[![Ubuntu build](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml/badge.svg)](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml) [![PyPI](https://img.shields.io/pypi/v/pyodbc?color=brightgreen)](https://pypi.org/project/pyodbc/) pyodbc is an open source Python module that makes accessing ODBC databases simple. It -implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) specification but is -packed with even more Pythonic convenience. +implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) specification but is packed with even more Pythonic convenience. The easiest way to install pyodbc is to use pip: @@ -31,6 +29,3 @@ compiler. See the [docs](https://github.com/mkleehammer/pyodbc/wiki/Install) fo [Documentation](https://github.com/mkleehammer/pyodbc/wiki) [Release Notes](https://github.com/mkleehammer/pyodbc/releases) - -IMPORTANT: Python 2.7 support is being ended. The pyodbc 4.x versions will be the last to -support Python 2.7. The pyodbc 5.x versions will support only Python 3.7 and above. diff --git a/appveyor.yml b/appveyor.yml index d290b397..83a69f95 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -53,23 +53,6 @@ environment: # all the Python versions to be tested, both 32-bit and 64-bit # ref: https://www.appveyor.com/docs/windows-images-software/#python - # Python 2.7 must be built with Visual Studio 9.0, which is available only - # on AppVeyor Windows images Visual Studio 2013 and Visual Studio 2015 - - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - PYTHON_HOME: "C:\\Python27" - - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - PYTHON_HOME: "C:\\Python27-x64" - - # Python 3.5+ need at least the Visual Studio 2015 image - - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 - PYTHON_HOME: "C:\\Python36" - - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 - PYTHON_HOME: "C:\\Python36-x64" - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 PYTHON_HOME: "C:\\Python37" diff --git a/appveyor/after_test.cmd b/appveyor/after_test.cmd deleted file mode 100644 index 69bd132e..00000000 --- a/appveyor/after_test.cmd +++ /dev/null @@ -1,13 +0,0 @@ -IF "%APVYR_GENERATE_WHEELS%" == "true" ( - ECHO *** pip install the "wheel" module - "%PYTHON_HOME%\python" -m pip install wheel --quiet --no-warn-script-location - ECHO. - ECHO *** Generate the wheel file - %WITH_COMPILER% "%PYTHON_HOME%\python" setup.py bdist_wheel - ECHO. - ECHO *** \dist directory listing: - DIR /B dist -) ELSE ( - ECHO *** Skipping generation of the wheel file - ECHO. -) diff --git a/appveyor/compile.cmd b/appveyor/compile.cmd deleted file mode 100644 index d88ded94..00000000 --- a/appveyor/compile.cmd +++ /dev/null @@ -1,80 +0,0 @@ -:: To build extensions for 64 bit Python 2, we need to configure environment -:: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of: -:: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0) -:: -:: To build extensions for 64 bit Python 3, we need to configure environment -:: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: -:: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1) -:: -:: 32 bit builds, and 64-bit builds for 3.5 and beyond, do not require specific -:: environment configurations. -:: -:: Note: this script needs to be run with the /E:ON and /V:ON flags for the -:: cmd interpreter, at least for (SDK v7.0) -:: -:: More details at: -:: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows -:: http://stackoverflow.com/a/13751649/163740 -:: -:: Author: Olivier Grisel -:: License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ -:: -:: The repeated CALL commands at the end of this file look redundant, but -:: if you move them outside the IF clauses, they do not run properly in -:: the SET_SDK_64==Y case, I don't know why. -@ECHO OFF - -SET COMMAND_TO_RUN=%* -SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows -SET WIN_WDK=C:\Program Files (x86)\Windows Kits\10\Include\wdf - -:: Extract the major and minor versions of the current Python interpreter, and bitness -FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write(str(sys.version_info.major))"`) DO ( -SET PYTHON_MAJOR_VERSION=%%F -) -FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write(str(sys.version_info.minor))"`) DO ( -SET PYTHON_MINOR_VERSION=%%F -) -FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write('64' if sys.maxsize > 2**32 else '32')"`) DO ( -SET PYTHON_ARCH=%%F -) -ECHO Inferred Python version (major, minor, arch): %PYTHON_MAJOR_VERSION% %PYTHON_MINOR_VERSION% %PYTHON_ARCH% - -:: Based on the Python version, determine what SDK version to use, and whether -:: to set the SDK for 64-bit. -IF %PYTHON_MAJOR_VERSION% EQU 2 ( - SET WINDOWS_SDK_VERSION="v7.0" - SET SET_SDK_64=Y -) ELSE ( - IF %PYTHON_MAJOR_VERSION% EQU 3 ( - SET WINDOWS_SDK_VERSION="v7.1" - SET SET_SDK_64=N - IF EXIST "%WIN_WDK%" ( - :: See: https://connect.microsoft.com/VisualStudio/feedback/details/1610302/ - REN "%WIN_WDK%" 0wdf - ) - ) ELSE ( - ECHO Unsupported Python version: "%PYTHON_MAJOR_VERSION%" - EXIT 1 - ) -) - -IF %PYTHON_ARCH% EQU 64 ( - IF %SET_SDK_64% == Y ( - ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %PYTHON_MAJOR_VERSION% on a 64 bit architecture - SET DISTUTILS_USE_SDK=1 - SET MSSdk=1 - "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION% - "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release - ECHO Executing: %COMMAND_TO_RUN% - CALL %COMMAND_TO_RUN% || EXIT 1 - ) ELSE ( - ECHO Using default MSVC build environment for 64 bit architecture - ECHO Executing: %COMMAND_TO_RUN% - CALL %COMMAND_TO_RUN% || EXIT 1 - ) -) ELSE ( - ECHO Using default MSVC build environment for 32 bit architecture - ECHO Executing: %COMMAND_TO_RUN% - CALL %COMMAND_TO_RUN% || EXIT 1 -) diff --git a/appveyor/test_connect.py b/appveyor/test_connect.py deleted file mode 100644 index 1acf7ebb..00000000 --- a/appveyor/test_connect.py +++ /dev/null @@ -1,4 +0,0 @@ -import sys -import pyodbc -c = pyodbc.connect(sys.argv[1]) -c.close() diff --git a/appveyor/test_script.cmd b/appveyor/test_script.cmd index 3216e413..36ac52f0 100644 --- a/appveyor/test_script.cmd +++ b/appveyor/test_script.cmd @@ -15,18 +15,9 @@ IF NOT "%APVYR_RUN_TESTS%" == "true" ( ) -REM Extract the major version of the current Python interpreter, and bitness -FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write(str(sys.version_info.major))"`) DO ( -SET PYTHON_MAJOR_VERSION=%%F -) FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write('64' if sys.maxsize > 2**32 else '32')"`) DO ( SET PYTHON_ARCH=%%F ) -IF %PYTHON_MAJOR_VERSION% EQU 2 ( - SET TESTS_DIR=tests2 -) ELSE ( - SET TESTS_DIR=tests3 -) :mssql @@ -68,7 +59,7 @@ SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) -"%PYTHON_HOME%\python" "%TESTS_DIR%\sqlservertests.py" %PYTHON_ARGS% +"%PYTHON_HOME%\python" "tests\sqlserver_test.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 :mssql2 @@ -88,7 +79,7 @@ SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) -"%PYTHON_HOME%\python" "%TESTS_DIR%\sqlservertests.py" %PYTHON_ARGS% +"%PYTHON_HOME%\python" "tests\sqlserver_test.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 :mssql3 @@ -107,7 +98,7 @@ SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) -"%PYTHON_HOME%\python" "%TESTS_DIR%\sqlservertests.py" %PYTHON_ARGS% +"%PYTHON_HOME%\python" "tests\sqlserver_test.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 :mssql4 @@ -126,7 +117,7 @@ SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) -"%PYTHON_HOME%\python" "%TESTS_DIR%\sqlservertests.py" %PYTHON_ARGS% +"%PYTHON_HOME%\python" "tests\sqlserver_test.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 :mssql5 @@ -145,7 +136,7 @@ SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) -"%PYTHON_HOME%\python" "%TESTS_DIR%\sqlservertests.py" %PYTHON_ARGS% +"%PYTHON_HOME%\python" "tests\sqlserver_test.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 :mssql6 @@ -164,7 +155,7 @@ SET PYTHON_ARGS="%CONN_STR:"=\"%" IF "%APVYR_VERBOSE%" == "true" ( SET PYTHON_ARGS=%PYTHON_ARGS% --verbose ) -"%PYTHON_HOME%\python" "%TESTS_DIR%\sqlservertests.py" %PYTHON_ARGS% +"%PYTHON_HOME%\python" "tests\sqlserver_test.py" %PYTHON_ARGS% IF ERRORLEVEL 1 SET OVERALL_RESULT=1 diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 00000000..fccb9966 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,12 @@ +# https://pip.pypa.io/en/stable/reference/requirements-file-format/ +# https://peps.python.org/pep-0508/ +# https://peps.python.org/pep-0440/ + +setuptools ~= 67.7 +pytest ~= 7.3 + +# I'm going to try leaving the versions off since we're supporting drastically different Python +# versions. I want the most up to date I can get in each, at least until one of them makes a +# backwards incompatible change. +flake8 +pylint diff --git a/requirements-test.txt b/requirements-test.txt deleted file mode 100644 index e079f8a6..00000000 --- a/requirements-test.txt +++ /dev/null @@ -1 +0,0 @@ -pytest diff --git a/setup.py b/setup.py index 3f150612..ef938a8a 100755 --- a/setup.py +++ b/setup.py @@ -1,157 +1,77 @@ #!/usr/bin/env python -import sys, os, re, shlex -from os.path import exists, abspath, dirname, join, isdir, relpath, expanduser - -try: - # Allow use of setuptools so eggs can be built. - from setuptools import setup, Command -except ImportError: - from distutils.core import setup, Command - -from distutils.extension import Extension -from distutils.errors import * - -if sys.hexversion >= 0x03000000: - from configparser import ConfigParser -else: - from ConfigParser import ConfigParser - -OFFICIAL_BUILD = 9999 - -# This version identifier should refer to the NEXT release, not the -# current one. After each release, the version should be incremented. -VERSION = '4.0.39' - - -def _print(s): - # Python 2/3 compatibility - sys.stdout.write(s + '\n') - - -class VersionCommand(Command): - - description = "prints the pyodbc version, determined from git" - - user_options = [] - - def initialize_options(self): - self.verbose = 0 - - def finalize_options(self): - pass - - def run(self): - version_str, _version = get_version() - sys.stdout.write(version_str + '\n') - +VERSION = '5.0.0' -class TagsCommand(Command): - - description = 'runs etags' - - user_options = [] +import sys, os, re, shlex, subprocess +from os.path import exists, abspath, dirname, join, isdir, relpath, expanduser +from inspect import cleandoc - def initialize_options(self): - pass +from setuptools import setup, Command +from setuptools.extension import Extension +from setuptools.errors import * - def finalize_options(self): - pass +from configparser import ConfigParser - def run(self): - # Windows versions of etag do not seem to expand wildcards (which Unix shells normally do for Unix utilities), - # so find all of the files ourselves. - files = [ join('src', f) for f in os.listdir('src') if f.endswith(('.h', '.cpp')) ] - cmd = 'etags %s' % ' '.join(files) - return os.system(cmd) +def _run(cmd): + return subprocess.run(cmd, check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + encoding='utf_8', shell=True).stdout def main(): + settings = get_compiler_settings() - version_str, version = get_version() - - with open(join(dirname(abspath(__file__)), 'README.md')) as f: - long_description = f.read() - - settings = get_compiler_settings(version_str) - - files = [ relpath(join('src', f)) for f in os.listdir('src') if f.endswith('.cpp') ] + files = [ relpath(join('src', f)) for f in os.listdir('src') + if f.endswith('.cpp') and (f != 'npcontainer.cpp' or numpy) ] if exists('MANIFEST'): os.remove('MANIFEST') - kwargs = { - 'name': "pyodbc", - 'version': version_str, - 'description': "DB API Module for ODBC", - - 'long_description': long_description, - 'long_description_content_type': 'text/markdown', - - 'maintainer': "Michael Kleehammer", - 'maintainer_email': "michael@kleehammer.com", - - 'ext_modules': [Extension('pyodbc', sorted(files), **settings)], - - 'packages': [''], - 'package_dir': {'': 'src'}, - 'package_data': {'': ['pyodbc.pyi']}, # places pyodbc.pyi alongside pyodbc.{platform}.{pyd|so} in site-packages - - 'license': 'MIT', - - 'python_requires': '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*', - - 'classifiers': ['Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'Intended Audience :: System Administrators', - 'License :: OSI Approved :: MIT License', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: POSIX', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - 'Topic :: Database', - ], - - 'url': 'https://github.com/mkleehammer/pyodbc', - 'cmdclass': { 'version' : VersionCommand, - 'tags' : TagsCommand } - } - - if sys.hexversion >= 0x02060000: - kwargs['options'] = { + setup( + name="pyodbc", + version=VERSION, + description="DB API Module for ODBC", + long_description=cleandoc(""" + pyodbc is an open source Python module that makes accessing ODBC databases simple. + It implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) + specification but is packed with even more Pythonic convenience."""), + maintainer= "Michael Kleehammer", + maintainer_email="michael@kleehammer.com", + url='https://github.com/mkleehammer/pyodbc', + ext_modules=[Extension('pyodbc', sorted(files), **settings)], + data_files=[ + ('', ['src/pyodbc.pyi']) # places pyodbc.pyi alongside pyodbc.py in site-packages + ], + license='MIT', + python_requires='>=3.7', + classifiers=['Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: MIT License', + 'Operating System :: Microsoft :: Windows', + 'Operating System :: POSIX', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Topic :: Database', + ], + options={ 'bdist_wininst': {'user_access_control' : 'auto'} - } - - setup(**kwargs) + } + ) -def get_compiler_settings(version_str): +def get_compiler_settings(): settings = { 'extra_compile_args' : [], 'extra_link_args': [], 'libraries': [], 'include_dirs': [], - 'define_macros' : [ ('PYODBC_VERSION', version_str) ] + 'define_macros' : [ ('PYODBC_VERSION', VERSION) ] } - - # This isn't the best or right way to do this, but I don't see how someone is supposed to sanely subclass the build - # command. - for option in ['assert', 'trace', 'leak-check']: - try: - sys.argv.remove('--%s' % option) - settings['define_macros'].append(('PYODBC_%s' % option.replace('-', '_').upper(), 1)) - except ValueError: - pass + if numpy: + settings['include_dirs'].append(numpy.get_include()) + settings['define_macros'].append(('WITH_NUMPY', '1')) if os.name == 'nt': settings['extra_compile_args'].extend([ @@ -238,12 +158,12 @@ def get_compiler_settings(version_str): if ldflags: settings['extra_link_args'].extend(ldflags.split()) - from array import array - UNICODE_WIDTH = array('u').itemsize -# if UNICODE_WIDTH == 4: -# # This makes UnixODBC use UCS-4 instead of UCS-2, which works better with sizeof(wchar_t)==4. -# # Thanks to Marc-Antoine Parent -# settings['define_macros'].append(('SQL_WCHART_CONVERT', '1')) + # from array import array + # UNICODE_WIDTH = array('u').itemsize + # if UNICODE_WIDTH == 4: + # # This makes UnixODBC use UCS-4 instead of UCS-2, which works better with sizeof(wchar_t)==4. + # # Thanks to Marc-Antoine Parent + # settings['define_macros'].append(('SQL_WCHART_CONVERT', '1')) # What is the proper way to detect iODBC, MyODBC, unixODBC, etc.? settings['libraries'].append('odbc') @@ -251,121 +171,5 @@ def get_compiler_settings(version_str): return settings -def get_version(): - """ - Returns the version of the product as (description, [major,minor,micro,beta]). - - If the release is official, `beta` will be 9999 (OFFICIAL_BUILD). - - 1. If in a git repository, use the latest tag (git describe). - 2. If in an unzipped source directory (from setup.py sdist), - read the version from the PKG-INFO file. - 3. Use 4.0.0.dev0 and complain a lot. - """ - # My goal is to (1) provide accurate tags for official releases but (2) not have to manage tags for every test - # release. - # - # Official versions are tagged using 3 numbers: major, minor, micro. A build of a tagged version should produce - # the version using just these pieces, such as 2.1.4. - # - # Unofficial versions are "working towards" the next version. So the next unofficial build after 2.1.4 would be a - # beta for 2.1.5. Using 'git describe' we can find out how many changes have been made after 2.1.4 and we'll use - # this count as the beta id (beta1, beta2, etc.) - # - # Since the 4 numbers are put into the Windows DLL, we want to make sure the beta versions sort *before* the - # official, so we set the official build number to 9999, but we don't show it. - - name = None # branch/feature name. Should be None for official builds. - numbers = None # The 4 integers that make up the version. - - # If we are in the CICD pipeline, use the VERSION. There is no tagging information available - # because Github Actions fetches the repo with the options --no-tags and --depth=1. - - # CI providers (Github Actions / Travis / CircleCI / AppVeyor / etc.) typically set CI to "true", but - # in cibuildwheel linux containers, the usual CI env vars are not available, only CIBUILDWHEEL. - if os.getenv('CI', 'false').lower() == 'true' or 'CIBUILDWHEEL' in os.environ: - name = VERSION - numbers = [int(p) for p in VERSION.split('.')] - return name, numbers - - # If this is a source release the version will have already been assigned and be in the PKG-INFO file. - - name, numbers = _get_version_pkginfo() - - # If not a source release, we should be in a git repository. Look for the latest tag. - - if not numbers: - name, numbers = _get_version_git() - - if not numbers: - _print('WARNING: Unable to determine version. Using 4.0.0.0') - name, numbers = '4.0.dev0', [4,0,0,0] - - return name, numbers - - -def _get_version_pkginfo(): - filename = join(dirname(abspath(__file__)), 'PKG-INFO') - if exists(filename): - re_ver = re.compile(r'^Version: \s+ (\d+)\.(\d+)\.(\d+) (?: b(\d+))?', re.VERBOSE) - for line in open(filename): - match = re_ver.search(line) - if match: - name = line.split(':', 1)[1].strip() - numbers = [int(n or 0) for n in match.groups()[:3]] - numbers.append(int(match.group(4) or OFFICIAL_BUILD)) # don't use 0 as a default for build - return name, numbers - - return None, None - - -def _get_version_git(): - """ - If this is a git repo, returns the version as text and the version as a list of 4 subparts: - ("4.0.33", [4, 0, 33, 9999]). - - If this is not a git repo, (None, None) is returned. - """ - n, result = getoutput("git describe --tags --match [0-9]*") - if n: - _print('WARNING: git describe failed with: %s %s' % (n, result)) - return None, None - match = re.match(r'(\d+).(\d+).(\d+) (?: -(\d+)-g[0-9a-z]+)?', result, re.VERBOSE) - if not match: - return None, None - - numbers = [int(n or OFFICIAL_BUILD) for n in match.groups()] - if numbers[-1] == OFFICIAL_BUILD: - name = '%s.%s.%s' % tuple(numbers[:3]) - if numbers[-1] != OFFICIAL_BUILD: - # This is a beta of the next micro release, so increment the micro number to reflect this. - numbers[-2] += 1 - name = '%s.%s.%sb%d' % tuple(numbers) - - n, result = getoutput('git rev-parse --abbrev-ref HEAD') - - if result == 'HEAD': - # We are not on a branch. In the past we would add "+commitHHHH" to it, but this - # interferes with the CI system which checks out by tag name. The goal of the version - # numbers is to be reproducible, so we may want to put this back if we detect the - # current commit is not on the master branch. - - # n, result = getoutput('git rev-parse --short HEAD') - # name = name + '+commit' + result - - pass - else: - if result != 'master' and not re.match(r'^v\d+$', result): - name = name + '+' + result.replace('-', '') - - return name, numbers - - -def getoutput(cmd): - pipe = os.popen(cmd, 'r') - text = pipe.read().rstrip('\n') - status = pipe.close() or 0 - return status, text - if __name__ == '__main__': main() diff --git a/src/buffer.cpp b/src/buffer.cpp deleted file mode 100644 index ddd1f154..00000000 --- a/src/buffer.cpp +++ /dev/null @@ -1,63 +0,0 @@ - -// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include "pyodbc.h" - -#if PY_MAJOR_VERSION < 3 - - -#include "buffer.h" -#include "pyodbcmodule.h" - -Py_ssize_t -PyBuffer_GetMemory(PyObject* buffer, const char** pp) -{ - PyBufferProcs* procs = Py_TYPE(buffer)->tp_as_buffer; - - if (!procs || !PyType_HasFeature(Py_TYPE(buffer), Py_TPFLAGS_HAVE_GETCHARBUFFER)) - { - // Can't access the memory directly because the buffer object doesn't support it. - return -1; - } - - if (procs->bf_getsegcount(buffer, 0) != 1) - { - // Can't access the memory directly because there is more than one segment. - return -1; - } - -#if PY_VERSION_HEX >= 0x02050000 - char* pT = 0; -#else - const char* pT = 0; -#endif - Py_ssize_t cb = procs->bf_getcharbuffer(buffer, 0, &pT); - - if (pp) - *pp = pT; - - return cb; -} - -Py_ssize_t -PyBuffer_Size(PyObject* self) -{ - if (!PyBuffer_Check(self)) - { - PyErr_SetString(PyExc_TypeError, "Not a buffer!"); - return 0; - } - - Py_ssize_t total_len = 0; - Py_TYPE(self)->tp_as_buffer->bf_getsegcount(self, &total_len); - return total_len; -} -#endif diff --git a/src/buffer.h b/src/buffer.h deleted file mode 100644 index f8d26dfc..00000000 --- a/src/buffer.h +++ /dev/null @@ -1,60 +0,0 @@ - -// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#ifndef _BUFFER_H -#define _BUFFER_H - -#if PY_MAJOR_VERSION < 3 - -// If the buffer object has a single, accessible segment, returns the length of the buffer. If 'pp' is not NULL, the -// address of the segment is also returned. If there is more than one segment or if it cannot be accessed, -1 is -// returned and 'pp' is not modified. -Py_ssize_t -PyBuffer_GetMemory(PyObject* buffer, const char** pp); - -// Returns the size of a Python buffer. -// -// If an error occurs, zero is returned, but zero is a valid buffer size (I guess), so use PyErr_Occurred to determine -// if it represents a failure. -Py_ssize_t -PyBuffer_Size(PyObject* self); - - -class BufferSegmentIterator -{ - PyObject* pBuffer; - Py_ssize_t iSegment; - Py_ssize_t cSegments; - -public: - BufferSegmentIterator(PyObject* _pBuffer) - { - pBuffer = _pBuffer; - PyBufferProcs* procs = Py_TYPE(pBuffer)->tp_as_buffer; - iSegment = 0; - cSegments = procs->bf_getsegcount(pBuffer, 0); - } - - bool Next(byte*& pb, SQLLEN &cb) - { - if (iSegment >= cSegments) - return false; - - PyBufferProcs* procs = Py_TYPE(pBuffer)->tp_as_buffer; - cb = procs->bf_getreadbuffer(pBuffer, iSegment++, (void**)&pb); - return true; - } -}; - -#endif // PY_MAJOR_VERSION - - -#endif diff --git a/src/cnxninfo.cpp b/src/cnxninfo.cpp index 99ce33bd..6a417062 100644 --- a/src/cnxninfo.cpp +++ b/src/cnxninfo.cpp @@ -22,17 +22,13 @@ bool CnxnInfo_init() // Called during startup to give us a chance to import the hash code. If we can't find it, we'll print a warning // to the console and not cache anything. - // First try hashlib which was added in 2.5. 2.6 complains using warnings which we don't want affecting the - // caller. - map_hash_to_info = PyDict_New(); - update = PyString_FromString("update"); + update = PyUnicode_FromString("update"); if (!map_hash_to_info || !update) return false; hashlib = PyImport_ImportModule("hashlib"); - if (!hashlib) return false; @@ -41,20 +37,10 @@ bool CnxnInfo_init() static PyObject* GetHash(PyObject* p) { -#if PY_MAJOR_VERSION >= 3 Object bytes(PyUnicode_AsUTF8String(p)); if (!bytes) return 0; p = bytes.Get(); -#else - Object bytes(PyUnicode_Check(p) ? PyUnicode_EncodeUTF8(PyUnicode_AS_UNICODE(p), PyUnicode_GET_SIZE(p), 0) : 0); - if (PyUnicode_Check(p)) - { - if (!bytes) - return 0; - p = bytes.Get(); - } -#endif Object hash(PyObject_CallMethod(hashlib, "new", "s", "sha1")); if (!hash.IsValid()) @@ -115,7 +101,7 @@ static PyObject* CnxnInfo_New(Connection* cnxn) // WARNING: The GIL lock is released for the *entire* function here. Do not // touch any objects, call Python APIs, etc. We are simply making ODBC // calls and setting atomic values (ints & chars). Also, make sure the lock - // gets released -- do not add an early exit. + // gets reaquired -- do not add an early exit. SQLRETURN ret; Py_BEGIN_ALLOW_THREADS diff --git a/src/connection.cpp b/src/connection.cpp index 6f561d19..a86af32c 100644 --- a/src/connection.cpp +++ b/src/connection.cpp @@ -9,7 +9,6 @@ // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pyodbc.h" -#include "buffer.h" #include "wrapper.h" #include "textenc.h" #include "connection.h" @@ -18,10 +17,6 @@ #include "errors.h" #include "cnxninfo.h" -#if PY_MAJOR_VERSION < 3 -static bool IsStringType(PyObject* t) { return (void*)t == (void*)&PyString_Type; } -static bool IsUnicodeType(PyObject* t) { return (void*)t == (void*)&PyUnicode_Type; } -#endif static char connection_doc[] = "Connection objects manage connections to the database.\n" @@ -49,28 +44,31 @@ static Connection* Connection_Validate(PyObject* self) return cnxn; } -static bool Connect(PyObject* pConnectString, HDBC hdbc, bool fAnsi, long timeout, - Object& encoding) -{ - // This should have been checked by the global connect function. - I(PyString_Check(pConnectString) || PyUnicode_Check(pConnectString)); - // The driver manager determines if the app is a Unicode app based on whether we call SQLDriverConnectA or - // SQLDriverConnectW. Some drivers, notably Microsoft Access/Jet, change their behavior based on this, so we try - // the Unicode version first. (The Access driver only supports Unicode text, but SQLDescribeCol returns SQL_CHAR - // instead of SQL_WCHAR if we connect with the ANSI version. Obviously this causes lots of errors since we believe - // what it tells us (SQL_CHAR).) +static char* StrDup(const char* text) { + // Like StrDup but uses PyMem_Malloc for the memory. This is only used for internal + // encodings which are known to be ASCII. + ssize_t cb = strlen(text) + 1; + char* pb = (char*)PyMem_Malloc(cb); + if (!pb) { + PyErr_NoMemory(); + return 0; + } + memcpy(pb, text, cb); + return pb; +} + - // Python supports only UCS-2 and UCS-4, so we shouldn't need to worry about receiving surrogate pairs. However, - // Windows does use UCS-16, so it is possible something would be misinterpreted as one. We may need to examine - // this more. +static bool Connect(PyObject* pConnectString, HDBC hdbc, long timeout, Object& encoding) +{ + assert(PyUnicode_Check(pConnectString)); SQLRETURN ret; if (timeout > 0) { Py_BEGIN_ALLOW_THREADS - ret = SQLSetConnectAttr(hdbc, SQL_ATTR_LOGIN_TIMEOUT, (SQLPOINTER)(uintptr_t)timeout, SQL_IS_UINTEGER); + ret = SQLSetConnectAttrW(hdbc, SQL_ATTR_LOGIN_TIMEOUT, (SQLPOINTER)(uintptr_t)timeout, SQL_IS_UINTEGER); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) RaiseErrorFromHandle(0, "SQLSetConnectAttr(SQL_ATTR_LOGIN_TIMEOUT)", hdbc, SQL_NULL_HANDLE); @@ -80,49 +78,18 @@ static bool Connect(PyObject* pConnectString, HDBC hdbc, bool fAnsi, long timeou Object encBytes; if (encoding) { - #if PY_MAJOR_VERSION < 3 - if (PyString_Check(encoding)) - { - szEncoding = PyString_AsString(encoding); - if (!szEncoding) - return false; - } - #endif if (PyUnicode_Check(encoding)) { - #if PY_MAJOR_VERSION < 3 - encBytes = PyUnicode_AsUTF8String(encoding); - if (!encBytes) - return false; - szEncoding = PyString_AS_STRING(encBytes.Get()); - #else - szEncoding = PyUnicode_AsUTF8(encoding); - #endif + szEncoding = PyUnicode_AsUTF8(encoding); } } - if (!fAnsi) - { - // I want to call the W version when possible since the driver can use it as an - // indication that we can handle Unicode. - - SQLWChar wchar(pConnectString, szEncoding ? szEncoding : ENCSTR_UTF16NE); - if (!wchar.isValid()) - return false; - - Py_BEGIN_ALLOW_THREADS - ret = SQLDriverConnectW(hdbc, 0, wchar.psz, SQL_NTS, 0, 0, 0, SQL_DRIVER_NOPROMPT); - Py_END_ALLOW_THREADS - if (SQL_SUCCEEDED(ret)) - return true; - } - - SQLWChar wchar(pConnectString, szEncoding ? szEncoding : "utf-8"); - if (!wchar.isValid()) + SQLWChar cstring(pConnectString, szEncoding ? szEncoding : ENCSTR_UTF16NE); + if (!cstring.isValid()) return false; Py_BEGIN_ALLOW_THREADS - ret = SQLDriverConnect(hdbc, 0, (SQLCHAR*)wchar.psz, SQL_NTS, 0, 0, 0, SQL_DRIVER_NOPROMPT); + ret = SQLDriverConnectW(hdbc, 0, cstring, SQL_NTS, 0, 0, 0, SQL_DRIVER_NOPROMPT); Py_END_ALLOW_THREADS if (SQL_SUCCEEDED(ret)) return true; @@ -138,6 +105,8 @@ static bool ApplyPreconnAttrs(HDBC hdbc, SQLINTEGER ikey, PyObject *value, char SQLPOINTER ivalue = 0; SQLINTEGER vallen = 0; + SQLWChar sqlchar; + if (PyLong_Check(value)) { if (_PyLong_Sign(value) >= 0) @@ -150,55 +119,16 @@ static bool ApplyPreconnAttrs(HDBC hdbc, SQLINTEGER ikey, PyObject *value, char vallen = SQL_IS_INTEGER; } } -#if PY_MAJOR_VERSION < 3 - else if (PyInt_Check(value)) - { - ivalue = (SQLPOINTER)PyInt_AsLong(value); - vallen = SQL_IS_INTEGER; - } - else if (PyBuffer_Check(value)) - { - // We can only assume and take the first segment. - PyBuffer_GetMemory(value, (const char**)&ivalue); - vallen = SQL_IS_POINTER; - } -#endif -#if PY_VERSION_HEX >= 0x02060000 else if (PyByteArray_Check(value)) { ivalue = (SQLPOINTER)PyByteArray_AsString(value); vallen = SQL_IS_POINTER; } -#endif - else if (PyBytes_Check(value)) - { - ivalue = PyBytes_AS_STRING(value); -#if PY_MAJOR_VERSION < 3 - vallen = SQL_NTS; -#else - vallen = SQL_IS_POINTER; -#endif - } else if (PyUnicode_Check(value)) { - Object stringholder; -if (sizeof(Py_UNICODE) == 2 // This part should be compile-time. - && (!strencoding || !strcmp(strencoding, "utf-16le"))) -{ - // default or utf-16le is set, pass through directly - ivalue = PyUnicode_AS_UNICODE(value); -} -else -{ - // use strencoding to convert, default to utf-16le if not set. - stringholder = PyCodec_Encode(value, strencoding ? strencoding : "utf-16le", "strict"); - ivalue = PyBytes_AS_STRING(stringholder.Get()); -} + sqlchar.set(value, strencoding ? strencoding : "utf-16le"); + ivalue = sqlchar.get(); vallen = SQL_NTS; - Py_BEGIN_ALLOW_THREADS - ret = SQLSetConnectAttrW(hdbc, ikey, ivalue, vallen); - Py_END_ALLOW_THREADS - goto checkSuccess; } else if (PySequence_Check(value)) { @@ -214,10 +144,9 @@ else } Py_BEGIN_ALLOW_THREADS - ret = SQLSetConnectAttr(hdbc, ikey, ivalue, vallen); + ret = SQLSetConnectAttrW(hdbc, ikey, ivalue, vallen); Py_END_ALLOW_THREADS -checkSuccess: if (!SQL_SUCCEEDED(ret)) { RaiseErrorFromHandle(0, "SQLSetConnectAttr", hdbc, SQL_NULL_HANDLE); @@ -229,15 +158,9 @@ else return true; } -PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi, long timeout, bool fReadOnly, +PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, long timeout, bool fReadOnly, PyObject* attrs_before, Object& encoding) { - // pConnectString - // A string or unicode object. (This must be checked by the caller.) - // - // fAnsi - // If true, do not attempt a Unicode connection. - // // Allocate HDBC and connect // @@ -272,10 +195,6 @@ PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi, if (PyLong_Check(key)) ikey = (int)PyLong_AsLong(key); -#if PY_MAJOR_VERSION < 3 - else if (PyInt_Check(key)) - ikey = (int)PyInt_AsLong(key); -#endif if (!ApplyPreconnAttrs(hdbc, ikey, value, strencoding)) { return 0; @@ -283,7 +202,7 @@ PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi, } } - if (!Connect(pConnectString, hdbc, fAnsi, timeout, encoding)) + if (!Connect(pConnectString, hdbc, timeout, encoding)) { // Connect has already set an exception. Py_BEGIN_ALLOW_THREADS @@ -330,15 +249,15 @@ PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi, // Server the encoding is based on the database's collation. We ask the driver / DB to // convert to SQL_C_WCHAR and use the ODBC default of UTF-16LE. cnxn->sqlchar_enc.optenc = OPTENC_UTF16NE; - cnxn->sqlchar_enc.name = _strdup(ENCSTR_UTF16NE); + cnxn->sqlchar_enc.name = StrDup(ENCSTR_UTF16NE); cnxn->sqlchar_enc.ctype = SQL_C_WCHAR; cnxn->sqlwchar_enc.optenc = OPTENC_UTF16NE; - cnxn->sqlwchar_enc.name = _strdup(ENCSTR_UTF16NE); + cnxn->sqlwchar_enc.name = StrDup(ENCSTR_UTF16NE); cnxn->sqlwchar_enc.ctype = SQL_C_WCHAR; cnxn->metadata_enc.optenc = OPTENC_UTF16NE; - cnxn->metadata_enc.name = _strdup(ENCSTR_UTF16NE); + cnxn->metadata_enc.name = StrDup(ENCSTR_UTF16NE); cnxn->metadata_enc.ctype = SQL_C_WCHAR; // Note: I attempted to use UTF-8 here too since it can hold any type, but SQL Server fails @@ -346,24 +265,10 @@ PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi, // character. I don't know if this is a bug in SQL Server's driver or if I'm missing // something, so we'll stay with the default ODBC conversions. cnxn->unicode_enc.optenc = OPTENC_UTF16NE; - cnxn->unicode_enc.name = _strdup(ENCSTR_UTF16NE); + cnxn->unicode_enc.name = StrDup(ENCSTR_UTF16NE); cnxn->unicode_enc.ctype = SQL_C_WCHAR; -#if PY_MAJOR_VERSION < 3 - cnxn->str_enc.optenc = OPTENC_UTF8; - cnxn->str_enc.name = _strdup("utf-8"); - cnxn->str_enc.ctype = SQL_C_CHAR; - - cnxn->sqlchar_enc.to = TO_UNICODE; - cnxn->sqlwchar_enc.to = TO_UNICODE; - cnxn->metadata_enc.to = TO_UNICODE; -#endif - - if (!cnxn->sqlchar_enc.name || !cnxn->sqlwchar_enc.name || !cnxn->metadata_enc.name || !cnxn->unicode_enc.name -#if PY_MAJOR_VERSION < 3 - || !cnxn->str_enc.name -#endif - ) + if (!cnxn->sqlchar_enc.name || !cnxn->sqlwchar_enc.name || !cnxn->metadata_enc.name || !cnxn->unicode_enc.name) { PyErr_NoMemory(); Py_DECREF(cnxn); @@ -406,8 +311,6 @@ PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi, } } - TRACE("cnxn.new cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); - // // Gather connection-level information we'll need later. // @@ -437,12 +340,12 @@ static void _clear_conv(Connection* cnxn) { if (cnxn->conv_count != 0) { - pyodbc_free(cnxn->conv_types); + PyMem_Free(cnxn->conv_types); cnxn->conv_types = 0; for (int i = 0; i < cnxn->conv_count; i++) Py_XDECREF(cnxn->conv_funcs[i]); - pyodbc_free(cnxn->conv_funcs); + PyMem_Free(cnxn->conv_funcs); cnxn->conv_funcs = 0; cnxn->conv_count = 0; @@ -515,18 +418,14 @@ static int Connection_clear(PyObject* self) Py_XDECREF(cnxn->searchescape); cnxn->searchescape = 0; - free((void*)cnxn->sqlchar_enc.name); + PyMem_Free((void*)cnxn->sqlchar_enc.name); cnxn->sqlchar_enc.name = 0; - free((void*)cnxn->sqlwchar_enc.name); + PyMem_Free((void*)cnxn->sqlwchar_enc.name); cnxn->sqlwchar_enc.name = 0; - free((void*)cnxn->metadata_enc.name); + PyMem_Free((void*)cnxn->metadata_enc.name); cnxn->metadata_enc.name = 0; - free((void*)cnxn->unicode_enc.name); + PyMem_Free((void*)cnxn->unicode_enc.name); cnxn->unicode_enc.name = 0; -#if PY_MAJOR_VERSION < 3 - free((void*)cnxn->str_enc.name); - cnxn->str_enc.name = 0; -#endif Py_XDECREF(cnxn->attrs_before); cnxn->attrs_before = 0; @@ -827,25 +726,18 @@ static PyObject* Connection_getinfo(PyObject* self, PyObject* args) break; case GI_STRING: - result = PyString_FromStringAndSize(szBuffer, (Py_ssize_t)cch); + result = PyUnicode_FromStringAndSize(szBuffer, (Py_ssize_t)cch); break; case GI_UINTEGER: { SQLUINTEGER n = *(SQLUINTEGER*)szBuffer; // Does this work on PPC or do we need a union? -#if PY_MAJOR_VERSION >= 3 result = PyLong_FromLong((long)n); -#else - if (n <= (SQLUINTEGER)PyInt_GetMax()) - result = PyInt_FromLong((long)n); - else - result = PyLong_FromUnsignedLong(n); -#endif break; } case GI_USMALLINT: - result = PyInt_FromLong(*(SQLUSMALLINT*)szBuffer); + result = PyLong_FromLong(*(SQLUSMALLINT*)szBuffer); break; } @@ -1008,7 +900,7 @@ static PyObject* Connection_getsearchescape(PyObject* self, void* closure) if (!SQL_SUCCEEDED(ret)) return RaiseErrorFromHandle(cnxn, "SQLGetInfo", cnxn->hdbc, SQL_NULL_HANDLE); - cnxn->searchescape = PyString_FromStringAndSize(sz, (Py_ssize_t)cch); + cnxn->searchescape = PyUnicode_FromStringAndSize(sz, (Py_ssize_t)cch); } Py_INCREF(cnxn->searchescape); @@ -1065,7 +957,7 @@ static PyObject* Connection_gettimeout(PyObject* self, void* closure) if (!cnxn) return 0; - return PyInt_FromLong(cnxn->timeout); + return PyLong_FromLong(cnxn->timeout); } static int Connection_settimeout(PyObject* self, PyObject* value, void* closure) @@ -1081,7 +973,7 @@ static int Connection_settimeout(PyObject* self, PyObject* value, void* closure) PyErr_SetString(PyExc_TypeError, "Cannot delete the timeout attribute."); return -1; } - long timeout = PyInt_AsLong(value); + long timeout = PyLong_AsLong(value); if (timeout == -1 && PyErr_Occurred()) return -1; if (timeout < 0) @@ -1142,8 +1034,8 @@ static bool _remove_converter(PyObject* self, SQLSMALLINT sqltype) // Note: If the realloc fails, the old array is still around and is 1 element too long but // everything will still work, so we ignore. - pyodbc_realloc((BYTE**)&types, count * sizeof(SQLSMALLINT)); - pyodbc_realloc((BYTE**)&funcs, count * sizeof(PyObject*)); + PyMem_Realloc((BYTE**)&types, count * sizeof(SQLSMALLINT)); + PyMem_Realloc((BYTE**)&funcs, count * sizeof(PyObject*)); cnxn->conv_count = count; cnxn->conv_types = types; @@ -1177,15 +1069,15 @@ static bool _add_converter(PyObject* self, SQLSMALLINT sqltype, PyObject* func) PyObject** oldfuncs = cnxn->conv_funcs; int newcount = oldcount + 1; - SQLSMALLINT* newtypes = (SQLSMALLINT*)pyodbc_malloc(sizeof(SQLSMALLINT) * newcount); - PyObject** newfuncs = (PyObject**)pyodbc_malloc(sizeof(PyObject*) * newcount); + SQLSMALLINT* newtypes = (SQLSMALLINT*)PyMem_Malloc(sizeof(SQLSMALLINT) * newcount); + PyObject** newfuncs = (PyObject**)PyMem_Malloc(sizeof(PyObject*) * newcount); if (newtypes == 0 || newfuncs == 0) { if (newtypes) - pyodbc_free(newtypes); + PyMem_Free(newtypes); if (newfuncs) - pyodbc_free(newfuncs); + PyMem_Free(newfuncs); PyErr_NoMemory(); return false; } @@ -1204,8 +1096,8 @@ static bool _add_converter(PyObject* self, SQLSMALLINT sqltype, PyObject* func) memcpy(&newtypes[1], oldtypes, sizeof(SQLSMALLINT) * oldcount); memcpy(&newfuncs[1], oldfuncs, sizeof(PyObject*) * oldcount); - pyodbc_free(oldtypes); - pyodbc_free(oldfuncs); + PyMem_Free(oldtypes); + PyMem_Free(oldfuncs); } return true; @@ -1226,11 +1118,7 @@ static char conv_add_doc[] = " The converter function which will be called with a single parameter, the\n" " value, and should return the converted value. If the value is NULL, the\n" " parameter will be None. Otherwise it will be a " -#if PY_MAJOR_VERSION >= 3 "bytes object.\n" -#else - "str object with the raw bytes.\n" -#endif "\n" "If func is None, any existing converter is removed." ; @@ -1357,7 +1245,7 @@ static void NormalizeCodecName(const char* src, char* dest, size_t cbDest) *pch = '\0'; } -static bool SetTextEncCommon(TextEnc& enc, const char* encoding, int ctype, bool allow_raw) +static bool SetTextEncCommon(TextEnc& enc, const char* encoding, int ctype) { // Code common to setencoding and setdecoding. @@ -1373,29 +1261,11 @@ static bool SetTextEncCommon(TextEnc& enc, const char* encoding, int ctype, bool char lower[30]; NormalizeCodecName(encoding, lower, sizeof(lower)); -#if PY_MAJOR_VERSION < 3 - if (strcmp(lower, "|raw|") == 0) - { - if (!allow_raw) - { - // Give a better error message for 'raw' than "not a registered codec". It is never - // registered. - PyErr_Format(PyExc_ValueError, "Raw codec is only allowed for str / SQL_CHAR"); - return false; - } - } - else if (!PyCodec_KnownEncoding(encoding)) - { - PyErr_Format(PyExc_ValueError, "not a registered codec: '%s'", encoding); - return false; - } -#else if (!PyCodec_KnownEncoding(encoding)) { PyErr_Format(PyExc_ValueError, "not a registered codec: '%s'", encoding); return false; } -#endif if (ctype != 0 && ctype != SQL_WCHAR && ctype != SQL_CHAR) { @@ -1403,14 +1273,14 @@ static bool SetTextEncCommon(TextEnc& enc, const char* encoding, int ctype, bool return false; } - char* cpy = _strdup(encoding); + char* cpy = StrDup(encoding); if (!cpy) { PyErr_NoMemory(); return false; } - free((void*)enc.name); + PyMem_Free((void*)enc.name); enc.name = cpy; if (strstr("|utf-8|utf8|", lower)) @@ -1453,13 +1323,6 @@ static bool SetTextEncCommon(TextEnc& enc, const char* encoding, int ctype, bool enc.optenc = OPTENC_LATIN1; enc.ctype = (SQLSMALLINT)(ctype ? ctype : SQL_C_CHAR); } -#if PY_MAJOR_VERSION < 3 - else if (strstr("|raw|", lower)) - { - enc.optenc = OPTENC_RAW; - enc.ctype = SQL_C_CHAR; - } -#endif else { enc.optenc = OPTENC_NONE; @@ -1473,7 +1336,6 @@ static PyObject* Connection_setencoding(PyObject* self, PyObject* args, PyObject { Connection* cnxn = (Connection*)self; -#if PY_MAJOR_VERSION >= 3 // In Python 3 we only support encodings for Unicode text. char* encoding = 0; int ctype = 0; @@ -1481,35 +1343,15 @@ static PyObject* Connection_setencoding(PyObject* self, PyObject* args, PyObject if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|si", kwlist, &encoding, &ctype)) return 0; TextEnc& enc = cnxn->unicode_enc; - bool allow_raw = false; -#else - // In Python 2, we support encodings for Unicode and strings. - PyObject* from_type; - char* encoding = 0; - int ctype = 0; - static char *kwlist[] = { "fromtype", "encoding", "ctype", 0 }; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|si", kwlist, &from_type, &encoding, &ctype)) - return 0; - - if (!IsUnicodeType(from_type) && ! IsStringType(from_type)) - return PyErr_Format(PyExc_TypeError, "fromtype must be str or unicode"); - TextEnc& enc = IsStringType(from_type) ? cnxn->str_enc : cnxn->unicode_enc; - bool allow_raw = IsStringType(from_type); -#endif - - if (!SetTextEncCommon(enc, encoding, ctype, allow_raw)) + if (!SetTextEncCommon(enc, encoding, ctype)) return 0; Py_RETURN_NONE; } static char setdecoding_doc[] = -#if PY_MAJOR_VERSION >= 3 "setdecoding(sqltype, encoding=None, ctype=None) --> None\n" -#else - "setdecoding(sqltype, encoding=None, ctype=None, to=None) --> None\n" -#endif "\n" "Configures how text of type `ctype` (SQL_CHAR or SQL_WCHAR) is decoded\n" "when read from the database.\n" @@ -1518,9 +1360,6 @@ static char setdecoding_doc[] = "pyodbc uses this lookup the decoding information set by this function.\n" "sqltype: pyodbc.SQL_CHAR or pyodbc.SQL_WCHAR\n\n" "encoding: A registered Python encoding such as \"utf-8\".\n\n" -#if PY_MAJOR_VERSION < 3 - "to: the desired Python object type - str or unicode" -#endif "ctype: The C data type should be requested. Set this to SQL_CHAR for\n" " single-byte encodings like UTF-8 and to SQL_WCHAR for two-byte encodings\n" " like UTF-16."; @@ -1533,31 +1372,10 @@ static PyObject* Connection_setdecoding(PyObject* self, PyObject* args, PyObject int sqltype; char* encoding = 0; int ctype = 0; - bool allow_raw = false; -#if PY_MAJOR_VERSION >= 3 static char *kwlist[] = {"sqltype", "encoding", "ctype", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|si", kwlist, &sqltype, &encoding, &ctype)) return 0; -#else - int to = 0; - PyObject* toObj = 0; - static char *kwlist[] = {"sqltype", "encoding", "ctype", "to", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|siO", kwlist, &sqltype, &encoding, &ctype, &toObj)) - return 0; - - if (toObj) - { - if (IsUnicodeType(toObj)) - to = TO_UNICODE; - else if (IsStringType(toObj)) - to = TO_STR; - else - return PyErr_Format(PyExc_ValueError, "`to` can only be unicode or str"); - } - - allow_raw = (sqltype == SQL_CHAR && to != TO_UNICODE); -#endif if (sqltype != SQL_WCHAR && sqltype != SQL_CHAR && sqltype != SQL_WMETADATA) return PyErr_Format(PyExc_ValueError, "Invalid sqltype %d. Must be SQL_CHAR or SQL_WCHAR or SQL_WMETADATA", sqltype); @@ -1565,16 +1383,9 @@ static PyObject* Connection_setdecoding(PyObject* self, PyObject* args, PyObject TextEnc& enc = (sqltype == SQL_CHAR) ? cnxn->sqlchar_enc : ((sqltype == SQL_WMETADATA) ? cnxn->metadata_enc : cnxn->sqlwchar_enc); - if (!SetTextEncCommon(enc, encoding, ctype, allow_raw)) + if (!SetTextEncCommon(enc, encoding, ctype)) return 0; -#if PY_MAJOR_VERSION < 3 - if (!to && enc.optenc == OPTENC_RAW) - enc.to = TO_STR; - else - enc.to = to ? to : TO_UNICODE; -#endif - Py_RETURN_NONE; } @@ -1594,7 +1405,7 @@ static PyObject* Connection_exit(PyObject* self, PyObject* args) Connection* cnxn = (Connection*)self; // If an error has occurred, `args` will be a tuple of 3 values. Otherwise it will be a tuple of 3 `None`s. - I(PyTuple_Check(args)); + assert(PyTuple_Check(args)); if (cnxn->nAutoCommit == SQL_AUTOCOMMIT_OFF) { diff --git a/src/connection.h b/src/connection.h index 6d9e3fce..361c84e2 100644 --- a/src/connection.h +++ b/src/connection.h @@ -52,9 +52,6 @@ struct Connection TextEnc sqlchar_enc; // encoding used when reading SQL_CHAR data TextEnc sqlwchar_enc; // encoding used when reading SQL_WCHAR data TextEnc unicode_enc; // encoding used when writing unicode strings -#if PY_MAJOR_VERSION < 3 - TextEnc str_enc; // encoding used when writing non-unicode strings -#endif TextEnc metadata_enc; // Used when reading column names for Cursor.description. I originally thought I could use @@ -78,7 +75,7 @@ struct Connection SQLLEN GetMaxLength(SQLSMALLINT ctype) const { - I(ctype == SQL_C_BINARY || ctype == SQL_C_WCHAR || ctype == SQL_C_CHAR); + assert(ctype == SQL_C_BINARY || ctype == SQL_C_WCHAR || ctype == SQL_C_CHAR); if (maxwrite != 0) return maxwrite; if (ctype == SQL_C_BINARY) @@ -109,7 +106,7 @@ struct Connection * Used by the module's connect function to create new connection objects. If unable to connect to the database, an * exception is set and zero is returned. */ -PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi, long timeout, bool fReadOnly, +PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, long timeout, bool fReadOnly, PyObject* attrs_before, Object& encoding); /* diff --git a/src/cursor.cpp b/src/cursor.cpp index 2b9ba96c..ef346f99 100644 --- a/src/cursor.cpp +++ b/src/cursor.cpp @@ -19,20 +19,15 @@ #include "pyodbcmodule.h" #include "connection.h" #include "row.h" -#include "buffer.h" #include "params.h" #include "errors.h" #include "getdata.h" #include "dbspecific.h" #include -enum -{ - CURSOR_REQUIRE_CNXN = 0x00000001, - CURSOR_REQUIRE_OPEN = 0x00000003, // includes _CNXN - CURSOR_REQUIRE_RESULTS = 0x00000007, // includes _OPEN - CURSOR_RAISE_ERROR = 0x00000010, -}; +#ifdef WITH_NUMPY +#include "npcontainer.h" +#endif inline bool StatementIsValid(Cursor* cursor) { @@ -46,7 +41,7 @@ inline bool Cursor_Check(PyObject* o) return o != 0 && Py_TYPE(o) == &CursorType; } -static Cursor* Cursor_Validate(PyObject* obj, DWORD flags) +Cursor* Cursor_Validate(PyObject* obj, DWORD flags) { // Validates that a PyObject is a Cursor (like Cursor_Check) and optionally some other requirements controlled by // `flags`. If valid and all requirements (from the flags) are met, the cursor is returned, cast to Cursor*. @@ -132,15 +127,15 @@ static bool create_name_map(Cursor* cur, SQLSMALLINT field_count, bool lower) bool success = false; PyObject *desc = 0, *colmap = 0, *colinfo = 0, *type = 0, *index = 0, *nullable_obj=0; SQLSMALLINT nameLen = 300; - ODBCCHAR *szName = NULL; + uint16_t *szName = NULL; SQLRETURN ret; - I(cur->hstmt != SQL_NULL_HANDLE && cur->colinfos != 0); + assert(cur->hstmt != SQL_NULL_HANDLE && cur->colinfos != 0); // These are the values we expect after free_results. If this function fails, we do not modify any members, so // they should be set to something Cursor_close can deal with. - I(cur->description == Py_None); - I(cur->map_name_to_index == 0); + assert(cur->description == Py_None); + assert(cur->map_name_to_index == 0); if (cur->cnxn->hdbc == SQL_NULL_HANDLE) { @@ -150,7 +145,7 @@ static bool create_name_map(Cursor* cur, SQLSMALLINT field_count, bool lower) desc = PyTuple_New((Py_ssize_t)field_count); colmap = PyDict_New(); - szName = (ODBCCHAR*) pyodbc_malloc((nameLen + 1) * sizeof(ODBCCHAR)); + szName = (uint16_t*) PyMem_Malloc((nameLen + 1) * sizeof(uint16_t)); if (!desc || !colmap || !szName) goto done; @@ -183,7 +178,7 @@ static bool create_name_map(Cursor* cur, SQLSMALLINT field_count, bool lower) // If needed, allocate a bigger column name message buffer and retry. if (cchName > nameLen - 1) { nameLen = cchName + 1; - if (!pyodbc_realloc((BYTE**) &szName, (nameLen + 1) * sizeof(ODBCCHAR))) { + if (!PyMem_Realloc((BYTE**) &szName, (nameLen + 1) * sizeof(uint16_t))) { PyErr_NoMemory(); goto done; } @@ -276,7 +271,7 @@ static bool create_name_map(Cursor* cur, SQLSMALLINT field_count, bool lower) nullable_obj = 0; - index = PyInt_FromLong(i); + index = PyLong_FromLong(i); if (!index) goto done; @@ -302,7 +297,7 @@ static bool create_name_map(Cursor* cur, SQLSMALLINT field_count, bool lower) Py_XDECREF(colmap); Py_XDECREF(index); Py_XDECREF(colinfo); - pyodbc_free(szName); + PyMem_Free(szName); return success; } @@ -328,8 +323,8 @@ static bool free_results(Cursor* self, int flags) // If we ran out of memory, it is possible that we have a cursor but colinfos is zero. However, we should be // deleting this object, so the cursor will be freed when the HSTMT is destroyed. */ - I((flags & STATEMENT_MASK) != 0); - I((flags & PREPARED_MASK) != 0); + assert((flags & STATEMENT_MASK) != 0); + assert((flags & PREPARED_MASK) != 0); if ((flags & PREPARED_MASK) == FREE_PREPARED) { @@ -339,7 +334,7 @@ static bool free_results(Cursor* self, int flags) if (self->colinfos) { - pyodbc_free(self->colinfos); + PyMem_Free(self->colinfos); self->colinfos = 0; } @@ -555,9 +550,9 @@ static bool PrepareResults(Cursor* cur, int cCols) // Allocates the ColumnInfo structures describing the returned data. int i; - I(cur->colinfos == 0); + assert(cur->colinfos == 0); - cur->colinfos = (ColumnInfo*)pyodbc_malloc(sizeof(ColumnInfo) * cCols); + cur->colinfos = (ColumnInfo*)PyMem_Malloc(sizeof(ColumnInfo) * cCols); if (cur->colinfos == 0) { PyErr_NoMemory(); @@ -568,7 +563,7 @@ static bool PrepareResults(Cursor* cur, int cCols) { if (!InitColumnInfo(cur, (SQLUSMALLINT)(i + 1), &cur->colinfos[i])) { - pyodbc_free(cur->colinfos); + PyMem_Free(cur->colinfos); cur->colinfos = 0; return false; } @@ -585,10 +580,10 @@ static int GetDiagRecs(Cursor* cur) PyObject* msg_list; // the "messages" as a Python list of diagnostic records SQLSMALLINT iRecNumber = 1; // the index of the diagnostic records (1-based) - ODBCCHAR cSQLState[6]; // five-character SQLSTATE code (plus terminating NULL) + uint16_t cSQLState[6]; // five-character SQLSTATE code (plus terminating NULL) SQLINTEGER iNativeError; SQLSMALLINT iMessageLen = 1023; - ODBCCHAR *cMessageText = (ODBCCHAR*) pyodbc_malloc((iMessageLen + 1) * sizeof(ODBCCHAR)); + uint16_t *cMessageText = (uint16_t*) PyMem_Malloc((iMessageLen + 1) * sizeof(uint16_t)); SQLSMALLINT iTextLength; SQLRETURN ret; @@ -622,8 +617,8 @@ static int GetDiagRecs(Cursor* cur) // If needed, allocate a bigger error message buffer and retry. if (iTextLength > iMessageLen - 1) { iMessageLen = iTextLength + 1; - if (!pyodbc_realloc((BYTE**) &cMessageText, (iMessageLen + 1) * sizeof(ODBCCHAR))) { - pyodbc_free(cMessageText); + if (!PyMem_Realloc((BYTE**) &cMessageText, (iMessageLen + 1) * sizeof(uint16_t))) { + PyMem_Free(cMessageText); PyErr_NoMemory(); return 0; } @@ -644,13 +639,13 @@ static int GetDiagRecs(Cursor* cur) // Default to UTF-16, which may not work if the driver/manager is using some other encoding const char *unicode_enc = cur->cnxn ? cur->cnxn->metadata_enc.name : ENCSTR_UTF16NE; PyObject* msg_value = PyUnicode_Decode( - (char*)cMessageText, iTextLength * sizeof(ODBCCHAR), unicode_enc, "strict" + (char*)cMessageText, iTextLength * sizeof(uint16_t), unicode_enc, "strict" ); if (!msg_value) { // If the char cannot be decoded, return something rather than nothing. Py_XDECREF(msg_value); - msg_value = PyBytes_FromStringAndSize((char*)cMessageText, iTextLength * sizeof(ODBCCHAR)); + msg_value = PyBytes_FromStringAndSize((char*)cMessageText, iTextLength * sizeof(uint16_t)); } PyObject* msg_tuple = PyTuple_New(2); // the message as a Python tuple of class and value @@ -672,7 +667,7 @@ static int GetDiagRecs(Cursor* cur) iRecNumber++; } - pyodbc_free(cMessageText); + PyMem_Free(cMessageText); Py_XDECREF(cur->messages); cur->messages = msg_list; // cur->messages now owns the msg_list reference @@ -740,17 +735,7 @@ static PyObject* execute(Cursor* cur, PyObject* pSql, PyObject* params, bool ski szLastFunction = "SQLExecDirect"; const TextEnc* penc = 0; - -#if PY_MAJOR_VERSION < 3 - if (PyString_Check(pSql)) - { - penc = &cur->cnxn->str_enc; - } - else -#endif - { - penc = &cur->cnxn->unicode_enc; - } + penc = &cur->cnxn->unicode_enc; Object query(penc->Encode(pSql)); if (!query) @@ -759,7 +744,7 @@ static PyObject* execute(Cursor* cur, PyObject* pSql, PyObject* params, bool ski bool isWide = (penc->ctype == SQL_C_WCHAR); const char* pch = PyBytes_AS_STRING(query.Get()); - SQLINTEGER cch = (SQLINTEGER)(PyBytes_GET_SIZE(query.Get()) / (isWide ? sizeof(ODBCCHAR) : 1)); + SQLINTEGER cch = (SQLINTEGER)(PyBytes_GET_SIZE(query.Get()) / (isWide ? sizeof(uint16_t) : 1)); Py_BEGIN_ALLOW_THREADS if (isWide) @@ -815,22 +800,17 @@ static PyObject* execute(Cursor* cur, PyObject* pSql, PyObject* params, bool ski if (ret == SQL_NEED_DATA) { szLastFunction = "SQLPutData"; - if (pInfo->pObject && (PyBytes_Check(pInfo->pObject) - #if PY_VERSION_HEX >= 0x02060000 - || PyByteArray_Check(pInfo->pObject) - #endif + if (pInfo->pObject && (PyBytes_Check(pInfo->pObject) || PyByteArray_Check(pInfo->pObject) )) { char *(*pGetPtr)(PyObject*); Py_ssize_t (*pGetLen)(PyObject*); - #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(pInfo->pObject)) { pGetPtr = PyByteArray_AsString; pGetLen = PyByteArray_Size; } else - #endif { pGetPtr = PyBytes_AsString; pGetLen = PyBytes_Size; @@ -853,25 +833,6 @@ static PyObject* execute(Cursor* cur, PyObject* pSql, PyObject* params, bool ski } while (offset < cb); } -#if PY_MAJOR_VERSION < 3 - else if (pInfo->pObject && PyBuffer_Check(pInfo->pObject)) - { - // Buffers can have multiple segments, so we might need multiple writes. Looping through buffers isn't - // difficult, but we've wrapped it up in an iterator object to keep this loop simple. - - BufferSegmentIterator it(pInfo->pObject); - byte* pb; - SQLLEN cb; - while (it.Next(pb, cb)) - { - Py_BEGIN_ALLOW_THREADS - ret = SQLPutData(cur->hstmt, pb, cb); - Py_END_ALLOW_THREADS - if (!SQL_SUCCEEDED(ret)) - return RaiseErrorFromHandle(cur->cnxn, "SQLPutData", cur->cnxn->hdbc, cur->hstmt); - } - } -#endif else if (pInfo->ParameterType == SQL_SS_TABLE) { // TVP @@ -904,7 +865,7 @@ static PyObject* execute(Cursor* cur, PyObject* pSql, PyObject* params, bool ski } if (prevParam->allocated) - pyodbc_free(prevParam->ParameterValuePtr); + PyMem_Free(prevParam->ParameterValuePtr); Py_XDECREF(prevParam->pObject); newParam.BufferLength = newParam.StrLen_or_Ind; newParam.StrLen_or_Ind = SQL_DATA_AT_EXEC; @@ -1041,7 +1002,7 @@ PyObject* Cursor_execute(PyObject* self, PyObject* args) PyObject* pSql = PyTuple_GET_ITEM(args, 0); - if (!PyString_Check(pSql) && !PyUnicode_Check(pSql)) + if (!PyUnicode_Check(pSql) && !PyUnicode_Check(pSql)) { PyErr_SetString(PyExc_TypeError, "The first argument to execute must be a string or unicode query."); return 0; @@ -1083,7 +1044,7 @@ static PyObject* Cursor_executemany(PyObject* self, PyObject* args) if (!PyArg_ParseTuple(args, "OO", &pSql, ¶m_seq)) return 0; - if (!PyString_Check(pSql) && !PyUnicode_Check(pSql)) + if (!PyUnicode_Check(pSql) && !PyUnicode_Check(pSql)) { PyErr_SetString(PyExc_TypeError, "The first argument to execute must be a string or unicode query."); return 0; @@ -1098,13 +1059,13 @@ static PyObject* Cursor_executemany(PyObject* self, PyObject* args) PyErr_SetString(ProgrammingError, "The second parameter to executemany must not be empty."); return 0; } - if (cursor->fastexecmany) - { - free_results(cursor, FREE_STATEMENT | KEEP_PREPARED); - if (!ExecuteMulti(cursor, pSql, param_seq)) - return 0; - } - else + // if (cursor->fastexecmany) + // { + // free_results(cursor, FREE_STATEMENT | KEEP_PREPARED); + // if (!ExecuteMulti(cursor, pSql, param_seq)) + // return 0; + // } + // else { for (Py_ssize_t i = 0; i < c; i++) { @@ -1224,7 +1185,7 @@ static PyObject* Cursor_fetch(Cursor* cur) field_count = PyTuple_GET_SIZE(cur->description); - apValues = (PyObject**)pyodbc_malloc(sizeof(PyObject*) * field_count); + apValues = (PyObject**)PyMem_Malloc(sizeof(PyObject*) * field_count); if (apValues == 0) return PyErr_NoMemory(); @@ -1384,7 +1345,6 @@ static PyObject* Cursor_fetchmany(PyObject* self, PyObject* args) return result; } - static char tables_doc[] = "C.tables(table=None, catalog=None, schema=None, tableType=None) --> self\n" "\n" @@ -1501,10 +1461,10 @@ static PyObject* Cursor_columns(PyObject* self, PyObject* args, PyObject* kwargs Py_BEGIN_ALLOW_THREADS ret = SQLColumnsW(cur->hstmt, - catalog.psz, SQL_NTS, - schema.psz, SQL_NTS, - table.psz, SQL_NTS, - column.psz, SQL_NTS); + catalog, SQL_NTS, + schema, SQL_NTS, + table, SQL_NTS, + column, SQL_NTS); Py_END_ALLOW_THREADS if (!SQL_SUCCEEDED(ret)) @@ -2403,7 +2363,7 @@ static PyObject* Cursor_exit(PyObject* self, PyObject* args) return 0; // If an error has occurred, `args` will be a tuple of 3 values. Otherwise it will be a tuple of 3 `None`s. - I(PyTuple_Check(args)); + assert(PyTuple_Check(args)); if (cursor->cnxn->nAutoCommit == SQL_AUTOCOMMIT_OFF && PyTuple_GetItem(args, 0) == Py_None) { @@ -2431,6 +2391,9 @@ static PyMethodDef Cursor_methods[] = { "fetchone", (PyCFunction)Cursor_fetchone, METH_NOARGS, fetchone_doc }, { "fetchall", (PyCFunction)Cursor_fetchall, METH_NOARGS, fetchall_doc }, { "fetchmany", (PyCFunction)Cursor_fetchmany, METH_VARARGS, fetchmany_doc }, +#ifdef WITH_NUMPY + { "fetchdictarray", (PyCFunction)Cursor_fetchdictarray, METH_VARARGS|METH_KEYWORDS, fetchdictarray_doc }, +#endif { "nextset", (PyCFunction)Cursor_nextset, METH_NOARGS, nextset_doc }, { "tables", (PyCFunction)Cursor_tables, METH_VARARGS|METH_KEYWORDS, tables_doc }, { "columns", (PyCFunction)Cursor_columns, METH_VARARGS|METH_KEYWORDS, columns_doc }, @@ -2485,11 +2448,7 @@ PyTypeObject CursorType = 0, // tp_getattro 0, // tp_setattro 0, // tp_as_buffer -#if defined(Py_TPFLAGS_HAVE_ITER) - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_ITER, -#else Py_TPFLAGS_DEFAULT, -#endif cursor_doc, // tp_doc 0, // tp_traverse 0, // tp_clear diff --git a/src/cursor.h b/src/cursor.h index b9fa43c2..40d77df2 100644 --- a/src/cursor.h +++ b/src/cursor.h @@ -13,6 +13,15 @@ #ifndef CURSOR_H #define CURSOR_H +enum +{ + CURSOR_REQUIRE_CNXN = 0x00000001, + CURSOR_REQUIRE_OPEN = 0x00000003, // includes _CNXN + CURSOR_REQUIRE_RESULTS = 0x00000007, // includes _OPEN + CURSOR_RAISE_ERROR = 0x00000010, +}; + + struct Connection; struct ColumnInfo @@ -160,5 +169,6 @@ void Cursor_init(); Cursor* Cursor_New(Connection* cnxn); PyObject* Cursor_execute(PyObject* self, PyObject* args); +Cursor* Cursor_Validate(PyObject* obj, DWORD flags); #endif diff --git a/src/errors.cpp b/src/errors.cpp index 5fc23419..6cc230b8 100644 --- a/src/errors.cpp +++ b/src/errors.cpp @@ -68,11 +68,11 @@ PyObject* RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* for exc_class = ExceptionFromSqlState(sqlstate); // Note: Don't use any native strprintf routines. With Py_ssize_t, we need "%zd", but VC .NET doesn't support it. - // PyString_FromFormatV already takes this into account. + // PyUnicode_FromFormatV already takes this into account. va_list marker; va_start(marker, format); - PyObject* pMsg = PyString_FromFormatV(format, marker); + PyObject* pMsg = PyUnicode_FromFormatV(format, marker); va_end(marker); if (!pMsg) { @@ -88,7 +88,7 @@ PyObject* RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* for pAttrs = Py_BuildValue("(Os)", pMsg, sqlstate); if (pAttrs) { - pError = PyEval_CallObject(exc_class, pAttrs); + pError = PyObject_CallObject(exc_class, pAttrs); if (pError) RaiseErrorFromException(pError); } @@ -101,38 +101,24 @@ PyObject* RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* for } -#if PY_MAJOR_VERSION < 3 -#define PyString_CompareWithASCIIString(lhs, rhs) _strcmpi(PyString_AS_STRING(lhs), rhs) -#else -#define PyString_CompareWithASCIIString PyUnicode_CompareWithASCIIString -#endif - - bool HasSqlState(PyObject* ex, const char* szSqlState) { - // Returns true if `ex` is an exception and has the given SQLSTATE. It is safe to pass 0 for ex. + // Returns true if `ex` is an exception and has the given SQLSTATE. It is safe to pass 0 for + // `ex`. - bool has = false; + if (!ex) + return false; - if (ex) - { - PyObject* args = PyObject_GetAttrString(ex, "args"); - if (args != 0) - { - PyObject* s = PySequence_GetItem(args, 1); - if (s != 0 && PyString_Check(s)) - { - // const char* sz = PyString_AsString(s); - // if (sz && _strcmpi(sz, szSqlState) == 0) - // has = true; - has = (PyString_CompareWithASCIIString(s, szSqlState) == 0); - } - Py_XDECREF(s); - Py_DECREF(args); - } - } + Object args(PyObject_GetAttrString(ex, "args")); + if (!args) + return false; + + Object sqlstate(PySequence_GetItem(args, 1)); + if (!sqlstate || !PyBytes_Check(sqlstate)) + return false; - return has; + const char* sz = PyBytes_AsString(sqlstate); + return (sz && _strcmpi(sz, szSqlState) == 0); } @@ -159,7 +145,7 @@ static PyObject* GetError(const char* sqlstate, PyObject* exc_class, PyObject* p PyTuple_SetItem(pAttrs, 1, pMsg); // pAttrs now owns the pMsg reference; steals a reference, does not increment - pSqlState = PyString_FromString(sqlstate); + pSqlState = PyUnicode_FromString(sqlstate); if (!pSqlState) { Py_DECREF(pAttrs); @@ -168,7 +154,7 @@ static PyObject* GetError(const char* sqlstate, PyObject* exc_class, PyObject* p PyTuple_SetItem(pAttrs, 0, pSqlState); // pAttrs now owns the pSqlState reference - pError = PyEval_CallObject(exc_class, pAttrs); // pError will incref pAttrs + pError = PyObject_CallObject(exc_class, pAttrs); // pError will incref pAttrs Py_XDECREF(pAttrs); @@ -216,9 +202,9 @@ PyObject* GetErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc SQLINTEGER nNativeError; SQLSMALLINT cchMsg; - ODBCCHAR sqlstateT[6]; + uint16_t sqlstateT[6]; SQLSMALLINT msgLen = 1023; - ODBCCHAR *szMsg = (ODBCCHAR*) pyodbc_malloc((msgLen + 1) * sizeof(ODBCCHAR)); + uint16_t *szMsg = (uint16_t*) PyMem_Malloc((msgLen + 1) * sizeof(uint16_t)); if (!szMsg) { PyErr_NoMemory(); @@ -265,9 +251,9 @@ PyObject* GetErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc // If needed, allocate a bigger error message buffer and retry. if (cchMsg > msgLen - 1) { msgLen = cchMsg + 1; - if (!pyodbc_realloc((BYTE**) &szMsg, (msgLen + 1) * sizeof(ODBCCHAR))) { + if (!PyMem_Realloc((BYTE**) &szMsg, (msgLen + 1) * sizeof(uint16_t))) { PyErr_NoMemory(); - pyodbc_free(szMsg); + PyMem_Free(szMsg); return 0; } Py_BEGIN_ALLOW_THREADS @@ -283,7 +269,7 @@ PyObject* GetErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc // For now, default to UTF-16 if this is not in the context of a connection. // Note that this will not work if the DM is using a different wide encoding (e.g. UTF-32). const char *unicode_enc = conn ? conn->metadata_enc.name : ENCSTR_UTF16NE; - Object msgStr(PyUnicode_Decode((char*)szMsg, cchMsg * sizeof(ODBCCHAR), unicode_enc, "strict")); + Object msgStr(PyUnicode_Decode((char*)szMsg, cchMsg * sizeof(uint16_t), unicode_enc, "strict")); if (cchMsg != 0 && msgStr.Get()) { @@ -295,7 +281,7 @@ PyObject* GetErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc msg = PyUnicode_FromFormat("[%s] %V (%ld) (%s)", sqlstate, msgStr.Get(), "(null)", (long)nNativeError, szFunction); if (!msg) { PyErr_NoMemory(); - pyodbc_free(szMsg); + PyMem_Free(szMsg); return 0; } } @@ -323,14 +309,14 @@ PyObject* GetErrorFromHandle(Connection *conn, const char* szFunction, HDBC hdbc } // Raw message buffer not needed anymore - pyodbc_free(szMsg); + PyMem_Free(szMsg); - if (!msg || PyUnicode_GetSize(msg.Get()) == 0) + if (!msg || PyUnicode_GET_LENGTH(msg.Get()) == 0) { // This only happens using unixODBC. (Haven't tried iODBC yet.) Either the driver or the driver manager is // buggy and has signaled a fault without recording error information. sqlstate[0] = '\0'; - msg = PyString_FromString(DEFAULT_ERROR); + msg = PyUnicode_FromString(DEFAULT_ERROR); if (!msg) { PyErr_NoMemory(); diff --git a/src/errors.h b/src/errors.h index f35f7d33..f2f47a34 100644 --- a/src/errors.h +++ b/src/errors.h @@ -58,16 +58,11 @@ bool HasSqlState(HSTMT hstmt, const char* szSqlState); inline PyObject* RaiseErrorFromException(PyObject* pError) { // PyExceptionInstance_Class doesn't exist in 2.4 -#if PY_MAJOR_VERSION >= 3 PyErr_SetObject((PyObject*)Py_TYPE(pError), pError); -#else - PyObject* cls = (PyObject*)((PyInstance_Check(pError) ? (PyObject*)((PyInstanceObject*)pError)->in_class : (PyObject*)(Py_TYPE(pError)))); - PyErr_SetObject(cls, pError); -#endif return 0; } -inline void CopySqlState(const ODBCCHAR* src, char* dest) +inline void CopySqlState(const uint16_t* src, char* dest) { // Copies a SQLSTATE read as SQLWCHAR into a character buffer. We know that SQLSTATEs are // composed of ASCII characters and we need one standard to compare when choosing @@ -76,14 +71,14 @@ inline void CopySqlState(const ODBCCHAR* src, char* dest) // Strangely, even when the error messages are UTF-8, PostgreSQL and MySQL encode the // sqlstate as UTF-16LE. We'll simply copy all non-zero bytes, with some checks for // running off the end of the buffers which will work for ASCII, UTF8, and UTF16 LE & BE. - // It would work for UTF32 if I increase the size of the ODBCCHAR buffer to handle it. + // It would work for UTF32 if I increase the size of the uint16_t buffer to handle it. // // (In the worst case, if a driver does something totally weird, we'll have an incomplete // SQLSTATE.) // const char* pchSrc = (const char*)src; - const char* pchSrcMax = pchSrc + sizeof(ODBCCHAR) * 5; + const char* pchSrcMax = pchSrc + sizeof(uint16_t) * 5; char* pchDest = dest; // Where we are copying into dest char* pchDestMax = dest + 5; // We know a SQLSTATE is 5 characters long diff --git a/src/getdata.cpp b/src/getdata.cpp index cd664a83..7f801f2f 100644 --- a/src/getdata.cpp +++ b/src/getdata.cpp @@ -19,7 +19,7 @@ // * pinfo->column_size, from SQLDescribeCol, does not include a NULL terminator. For example, column_size for a // char(10) column would be 10. (Also, when dealing with SQLWCHAR, it is the number of *characters*, not bytes.) // -// * When passing a length to PyString_FromStringAndSize and similar Unicode functions, do not add the NULL +// * When passing a length to PyUnicode_FromStringAndSize and similar Unicode functions, do not add the NULL // terminator -- it will be added automatically. See objects/stringobject.c // // * SQLGetData does not return the NULL terminator in the length indicator. (Therefore, you can pass this value @@ -66,7 +66,6 @@ inline bool IsWideType(SQLSMALLINT sqltype) return false; } -// TODO: Won't pyodbc_free crash if we didn't use pyodbc_realloc. static bool ReadVarColumn(Cursor* cur, Py_ssize_t iCol, SQLSMALLINT ctype, bool& isNull, byte*& pbResult, Py_ssize_t& cbResult) { @@ -78,7 +77,7 @@ static bool ReadVarColumn(Cursor* cur, Py_ssize_t iCol, SQLSMALLINT ctype, bool& // // If a non-null and non-empty value was read, pbResult will be set to a buffer containing // the data and cbResult will be set to the byte length. This length does *not* include a - // null terminator. In this case the data *must* be freed using pyodbc_free. + // null terminator. In this case the data *must* be freed using PyMem_Free. // // If a null value was read, isNull is set to true and pbResult and cbResult will be set to // 0. @@ -90,13 +89,13 @@ static bool ReadVarColumn(Cursor* cur, Py_ssize_t iCol, SQLSMALLINT ctype, bool& pbResult = 0; cbResult = 0; - const Py_ssize_t cbElement = (Py_ssize_t)(IsWideType(ctype) ? sizeof(ODBCCHAR) : 1); + const Py_ssize_t cbElement = (Py_ssize_t)(IsWideType(ctype) ? sizeof(uint16_t) : 1); const Py_ssize_t cbNullTerminator = IsBinaryType(ctype) ? 0 : cbElement; // TODO: Make the initial allocation size configurable? Py_ssize_t cbAllocated = 4096; Py_ssize_t cbUsed = 0; - byte* pb = (byte*)malloc((size_t)cbAllocated); + byte* pb = (byte*)PyMem_Malloc((size_t)cbAllocated); if (!pb) { PyErr_NoMemory(); @@ -180,6 +179,8 @@ static bool ReadVarColumn(Cursor* cur, Py_ssize_t iCol, SQLSMALLINT ctype, bool& cbUsed += cbRead; + TRACE("Memory Need: cbRemaining=%ld cbRead=%ld\n", (long)cbRemaining, (long)cbRead); + if (cbRemaining > 0) { // This is a tiny bit complicated by the fact that the data is null terminated, @@ -214,7 +215,7 @@ static bool ReadVarColumn(Cursor* cur, Py_ssize_t iCol, SQLSMALLINT ctype, bool& } else { - pyodbc_free(pb); + PyMem_Free(pb); } return true; @@ -226,10 +227,10 @@ static byte* ReallocOrFreeBuffer(byte* pb, Py_ssize_t cbNeed) // is freed, a memory exception is set, and 0 is returned. Otherwise the new pointer is // returned. - byte* pbNew = (byte*)realloc(pb, (size_t)cbNeed); + byte* pbNew = (byte*)PyMem_Realloc(pb, (size_t)cbNeed); if (pbNew == 0) { - pyodbc_free(pb); + PyMem_Free(pb); PyErr_NoMemory(); return 0; } @@ -261,13 +262,13 @@ static PyObject* GetText(Cursor* cur, Py_ssize_t iCol) if (isNull) { - I(pbData == 0 && cbData == 0); + assert(pbData == 0 && cbData == 0); Py_RETURN_NONE; } PyObject* result = TextBufferToObject(enc, pbData, cbData); - pyodbc_free(pbData); + PyMem_Free(pbData); return result; } @@ -286,17 +287,13 @@ static PyObject* GetBinary(Cursor* cur, Py_ssize_t iCol) if (isNull) { - I(pbData == 0 && cbData == 0); + assert(pbData == 0 && cbData == 0); Py_RETURN_NONE; } PyObject* obj; -#if PY_MAJOR_VERSION >= 3 obj = PyBytes_FromStringAndSize((char*)pbData, cbData); -#else - obj = PyByteArray_FromStringAndSize((char*)pbData, cbData); -#endif - pyodbc_free(pbData); + PyMem_Free(pbData); return obj; } @@ -314,12 +311,12 @@ static PyObject* GetDataUser(Cursor* cur, Py_ssize_t iCol, int conv) if (isNull) { - I(pbData == 0 && cbData == 0); + assert(pbData == 0 && cbData == 0); Py_RETURN_NONE; } PyObject* value = PyBytes_FromStringAndSize((char*)pbData, cbData); - pyodbc_free(pbData); + PyMem_Free(pbData); if (!value) return 0; @@ -332,39 +329,26 @@ static PyObject* GetDataUser(Cursor* cur, Py_ssize_t iCol, int conv) } -#if PY_VERSION_HEX < 0x02060000 -static PyObject* GetDataBuffer(Cursor* cur, Py_ssize_t iCol) -{ - PyObject* str = GetDataString(cur, iCol); - - if (str == Py_None) - return str; - - PyObject* buffer = 0; - - if (str) - { - buffer = PyBuffer_FromObject(str, 0, PyString_GET_SIZE(str)); - Py_DECREF(str); // If no buffer, release it. If buffer, the buffer owns it. - } - - return buffer; -} -#endif - static PyObject* GetDataDecimal(Cursor* cur, Py_ssize_t iCol) { - // The SQL_NUMERIC_STRUCT support is hopeless (SQL Server ignores scale on input parameters and output columns, - // Oracle does something else weird, and many drivers don't support it at all), so we'll rely on the Decimal's - // string parsing. Unfortunately, the Decimal author does not pay attention to the locale, so we have to modify - // the string ourselves. + // The SQL_NUMERIC_STRUCT support is hopeless (SQL Server ignores scale on input parameters + // and output columns, Oracle does something else weird, and many drivers don't support it + // at all), so we'll rely on the Decimal's string parsing. Unfortunately, the Decimal + // author does not pay attention to the locale, so we have to modify the string ourselves. + // + // Oracle inserts group separators (commas in US, periods in some countries), so leave room + // for that too. // - // Oracle inserts group separators (commas in US, periods in some countries), so leave room for that too. + // Some databases support a 'money' type which also inserts currency symbols. Since we + // don't want to keep track of all these, we'll ignore all characters we don't recognize. + // We will look for digits, negative sign (which I hope is universal), and a decimal point + // ('.' or ',' usually). We'll do everything as Unicode in case currencies, etc. are too + // far out. // - // Some databases support a 'money' type which also inserts currency symbols. Since we don't want to keep track of - // all these, we'll ignore all characters we don't recognize. We will look for digits, negative sign (which I hope - // is universal), and a decimal point ('.' or ',' usually). We'll do everything as Unicode in case currencies, - // etc. are too far out. + // This seems very inefficient. We know the characters we are interested in are ASCII + // since they are -, ., and 0-9. There /could/ be a Unicode currency symbol, but I'm going + // to ignore that for right now. Therefore if we ask for the data in SQLCHAR, it should be + // ASCII even if the encoding is UTF-8. const TextEnc& enc = cur->cnxn->sqlwchar_enc; // I'm going to request the data as Unicode in case there is a weird currency symbol. If @@ -378,13 +362,13 @@ static PyObject* GetDataDecimal(Cursor* cur, Py_ssize_t iCol) if (isNull) { - I(pbData == 0 && cbData == 0); + assert(pbData == 0 && cbData == 0); Py_RETURN_NONE; } Object result(DecimalFromText(enc, pbData, cbData)); - pyodbc_free(pbData); + PyMem_Free(pbData); return result.Detach(); } @@ -432,9 +416,9 @@ static PyObject* GetDataLong(Cursor* cur, Py_ssize_t iCol) Py_RETURN_NONE; if (pinfo->is_unsigned) - return PyInt_FromLong(*(SQLINTEGER*)&value); + return PyLong_FromLong(*(SQLINTEGER*)&value); - return PyInt_FromLong(value); + return PyLong_FromLong(value); } @@ -520,11 +504,7 @@ static PyObject* GetUUID(Cursor* cur, Py_ssize_t iCol) if (cbFetched == SQL_NULL_DATA) Py_RETURN_NONE; -#if PY_MAJOR_VERSION >= 3 const char* szFmt = "(yyy#)"; -#else - const char* szFmt = "(sss#)"; -#endif Object args(Py_BuildValue(szFmt, NULL, NULL, &guid, (int)sizeof(guid))); if (!args) return 0; @@ -626,7 +606,7 @@ PyObject* PythonTypeFromSqlType(Cursor* cur, SQLSMALLINT type) int conv_index = GetUserConvIndex(cur, type); if (conv_index != -1) - return (PyObject*)&PyString_Type; + return (PyObject*)&PyUnicode_Type; PyObject* pytype = 0; bool incref = true; @@ -636,14 +616,7 @@ PyObject* PythonTypeFromSqlType(Cursor* cur, SQLSMALLINT type) case SQL_CHAR: case SQL_VARCHAR: case SQL_LONGVARCHAR: -#if PY_MAJOR_VERSION < 3 - if (cur->cnxn->str_enc.ctype == SQL_C_CHAR) - pytype = (PyObject*)&PyString_Type; - else - pytype = (PyObject*)&PyUnicode_Type; -#else pytype = (PyObject*)&PyUnicode_Type; -#endif break; case SQL_GUID: @@ -654,14 +627,7 @@ PyObject* PythonTypeFromSqlType(Cursor* cur, SQLSMALLINT type) } else { -#if PY_MAJOR_VERSION < 3 - if (cur->cnxn->str_enc.ctype == SQL_C_CHAR) - pytype = (PyObject*)&PyString_Type; - else - pytype = (PyObject*)&PyUnicode_Type; -#else - pytype = (PyObject*)&PyUnicode_Type; -#endif + pytype = (PyObject*)&PyUnicode_Type; } break; @@ -688,7 +654,7 @@ PyObject* PythonTypeFromSqlType(Cursor* cur, SQLSMALLINT type) case SQL_SMALLINT: case SQL_INTEGER: case SQL_TINYINT: - pytype = (PyObject*)&PyInt_Type; + pytype = (PyObject*)&PyLong_Type; break; case SQL_TYPE_DATE: @@ -716,11 +682,7 @@ PyObject* PythonTypeFromSqlType(Cursor* cur, SQLSMALLINT type) case SQL_VARBINARY: case SQL_LONGVARBINARY: default: -#if PY_VERSION_HEX >= 0x02060000 pytype = (PyObject*)&PyByteArray_Type; -#else - pytype = (PyObject*)&PyBuffer_Type; -#endif break; } diff --git a/src/npcontainer.cpp b/src/npcontainer.cpp new file mode 100644 index 00000000..a4e70f3c --- /dev/null +++ b/src/npcontainer.cpp @@ -0,0 +1,1684 @@ +// +// Extensions for putting the data results of queries in NumPy containers. +// Authors: Francesc Alted (original author) +// Oscar Villellas +// Copyright: Continuum Analytics 2012-2014 +// + +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + +#include +#include + +#include "pyodbc.h" +#include "wrapper.h" +#include "textenc.h" +#include "cursor.h" +#include "pyodbcmodule.h" +#include "connection.h" +#include "errors.h" +#include "dbspecific.h" + +#include "numpy/ndarrayobject.h" +#include "numpy/npy_math.h" + +// exported variables ---------------------------------------------------------- + +/* controls the maximum text field width, a negative value indicates that the + text size limit will be dynamic based on the sql type, e.g. varchar (4000) */ +Py_ssize_t iopro_text_limit = -1; + +// ----------------------------------------------------------------------------- + +bool pyodbc_tracing_enabled = false; + +void pyodbc_trace_func(const char* file, int line, const char* fmt, ...) +{ + va_list args; + va_start(args, fmt); + + if (0 != file) { + const char* rel_file = strstr(file, "dbadapter"); + printf("%s:%d\n", rel_file, line); + } + vprintf(fmt, args); +} + +#define TRACE_NOLOC(...) \ + if (pyodbc_tracing_enabled) \ + pyodbc_trace_func(NULL, 0, __VA_ARGS__) + +#define GUARDED_ALLOC(...) malloc(__VA_ARGS__) +#define GUARDED_DEALLOC(...) free(__VA_ARGS__) + +#define CHECK_ALLOC_GUARDS(...) {} + + +namespace { + inline size_t + limit_text_size(size_t sz) + { + if (iopro_text_limit < 0) + return sz; + + size_t sz_limit = static_cast(iopro_text_limit); + return sz < sz_limit? sz : sz_limit; + } + + class PyNoGIL + /* a RAII class for Python GIL */ + { + public: + PyNoGIL() + { + Py_UNBLOCK_THREADS + } + ~PyNoGIL() + { + Py_BLOCK_THREADS + } + + private: + PyThreadState *_save; + }; + +} + +// The number of rows to be fetched in case the driver cannot specify it +static size_t DEFAULT_ROWS_TO_BE_FETCHED = 10000; +static size_t DEFAULT_ROWS_TO_BE_ALLOCATED = DEFAULT_ROWS_TO_BE_FETCHED; +// API version 7 is the first one that we can use DATE/TIME +// in a pretty bug-free way. This is set to true in +// the module init function if running on Numpy >= API version 7. +static bool CAN_USE_DATETIME = false; + + +const char * +sql_type_to_str(SQLSMALLINT type) +{ +#define TYPENAME(x,y) case x: return y; + switch (type) + { + TYPENAME(SQL_CHAR, "char"); + TYPENAME(SQL_VARCHAR, "varchar"); + TYPENAME(SQL_LONGVARCHAR, "longvarchar"); + TYPENAME(SQL_WCHAR, "wchar"); + TYPENAME(SQL_WVARCHAR, "wvarchar"); + TYPENAME(SQL_WLONGVARCHAR, "wlongvarchar"); + + TYPENAME(SQL_DECIMAL, "decimal"); + TYPENAME(SQL_NUMERIC, "numeric"); + TYPENAME(SQL_SMALLINT, "smallint"); + TYPENAME(SQL_INTEGER, "integer"); + TYPENAME(SQL_REAL, "real"); + TYPENAME(SQL_FLOAT, "float"); + TYPENAME(SQL_DOUBLE, "double"); + TYPENAME(SQL_BIT, "bit"); + TYPENAME(SQL_TINYINT, "tiny"); + TYPENAME(SQL_BIGINT, "bigint"); + + TYPENAME(SQL_BINARY, "binary"); + TYPENAME(SQL_VARBINARY, "varbinary"); + TYPENAME(SQL_LONGVARBINARY, "longvarbinary"); + + TYPENAME(SQL_TYPE_DATE, "date"); + TYPENAME(SQL_TYPE_TIME, "time"); + TYPENAME(SQL_TYPE_TIMESTAMP, "timestamp"); + + TYPENAME(SQL_GUID, "guid"); + default: + return "UNKNOWN"; + } +#undef TYPENAME +} + +const char * +sql_c_type_to_str(SQLSMALLINT type) +{ +#define TYPENAME(x,y) case x: return y; + switch (type) + { + TYPENAME(SQL_C_BIT, "bit"); + TYPENAME(SQL_C_CHAR, "char"); + TYPENAME(SQL_C_WCHAR, "wchar"); + TYPENAME(SQL_C_TINYINT, "tinyint"); + TYPENAME(SQL_C_SSHORT, "sshort"); + TYPENAME(SQL_C_SLONG, "slong"); + TYPENAME(SQL_C_SBIGINT, "sbigint"); + TYPENAME(SQL_C_FLOAT, "float"); + TYPENAME(SQL_C_DOUBLE, "double"); + TYPENAME(SQL_C_TYPE_DATE, "date struct"); + TYPENAME(SQL_C_TIMESTAMP, "timestamp struct"); + TYPENAME(SQL_C_TIME, "time struct"); + default: + return "UNKNOWN"; + } +#undef TYPENAME +} + +using namespace std; + +// Days per month, regular year and leap year +int _days_per_month_table[2][12] = { + { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }, + { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 } +}; + +// +// Returns 1 if the given year is a leap year, 0 otherwise. +// +int +is_leapyear(SQLSMALLINT year) +{ + return (year & 0x3) == 0 && /* year % 4 == 0 */ + ((year % 100) != 0 || + (year % 400) == 0); +} + +// +// Calculates the days offset from the 1970 epoch. +// +// Code strongly based on its NumPy counterpart. +// +npy_int64 +get_datestruct_days(const DATE_STRUCT *dts) +{ + int i, month; + npy_int64 year, days = 0; + int *month_lengths; + + year = dts->year - 1970; + days = year * 365; + + /* Adjust for leap years */ + if (days >= 0) { + /* + * 1968 is the closest leap year before 1970. + * Exclude the current year, so add 1. + */ + year += 1; + /* Add one day for each 4 years */ + days += year / 4; + /* 1900 is the closest previous year divisible by 100 */ + year += 68; + /* Subtract one day for each 100 years */ + days -= year / 100; + /* 1600 is the closest previous year divisible by 400 */ + year += 300; + /* Add one day for each 400 years */ + days += year / 400; + } + else { + /* + * 1972 is the closest later year after 1970. + * Include the current year, so subtract 2. + */ + year -= 2; + /* Subtract one day for each 4 years */ + days += year / 4; + /* 2000 is the closest later year divisible by 100 */ + year -= 28; + /* Add one day for each 100 years */ + days -= year / 100; + /* 2000 is also the closest later year divisible by 400 */ + /* Subtract one day for each 400 years */ + days += year / 400; + } + + month_lengths = _days_per_month_table[is_leapyear(dts->year)]; + month = dts->month - 1; + /* make sure month is in range. This prevents an illegal access + when bad input is passed to this function */ + month = month<0 || month>11 ? 0:month; + + /* Add the months */ + for (i = 0; i < month; ++i) { + days += month_lengths[i]; + } + + /* Add the days */ + days += dts->day - 1; + + return days; +} + +// +// Convert a datetime from a datetimestruct to a datetime64 based +// on some metadata. The date is assumed to be valid. +// +// This code is heavily based on NumPy 1.7 equivalent function. +// Only conversion to microseconds is supported here. +// +npy_datetime +convert_datetimestruct_to_datetime(const TIMESTAMP_STRUCT *dts) +{ + npy_datetime ret; + + // Calculate the number of days to start + npy_int64 days = get_datestruct_days((DATE_STRUCT*)dts); + ret = (((days * 24 + + dts->hour) * 60 + + dts->minute) * 60 + + dts->second) * 1000000 + + dts->fraction / 1000; // fraction is in ns (billionths of a second) + + return ret; +} + +// +// Convert a date from a datestruct to a datetime64 based +// on some metadata. The date is assumed to be valid. +// +npy_datetime +convert_datestruct_to_datetime(const DATE_STRUCT *dts) +{ + + // Calculate the number of days to start + npy_datetime days = get_datestruct_days(dts); + + return days; +} + +// +// Convert a time from a timestruct to a timedelta64 based +// on some metadata. The time is assumed to be valid. +// +npy_timedelta +convert_timestruct_to_timedelta(const TIME_STRUCT *dts) +{ + npy_timedelta seconds = (((dts->hour * 60) + dts->minute) * 60) + + dts->second; + + return seconds; +} + + +/* + * This is a debug helper function that allows dumping a memory buffer + * to a string for use within TRACE calls. It reuses an internal static + * buffer so it won't be thread safe and it will reuse the same memory + * in different calls, but it will be enough for debugging. + * Note: the string is valid until the next call of this function, as + * the buffer will be reused + */ +static +const char * +raw_buffer_as_print_string(const void *ptr, size_t len) +{ + static char _work_buffer[72]; + static char *hex_digit = "0123456789abcdef"; + const size_t max_bytes_to_dump = (sizeof(_work_buffer)/sizeof(_work_buffer[0]))/3; + size_t pre = len < max_bytes_to_dump ? len : max_bytes_to_dump - 4; + size_t post = len < max_bytes_to_dump ? 0 : max_bytes_to_dump - pre - 1; + char *out = _work_buffer; + const unsigned char *in = reinterpret_cast(ptr); + if (len == 0) + return ""; + + for (size_t i=0; i>4) & 0xf]; + *out++ = hex_digit[c&0xf]; + *out++ = ' '; + } + + if (post) { + *out++ = '.'; + *out++ = '.'; + *out++ = ' '; + in += len - post; + for (size_t i = 0; i < post; i++) + { + unsigned char c = in[i]; + *out++ = hex_digit[(c>>4) & 0xf]; + *out++ = hex_digit[c&0xf]; + *out++ = ' '; + } + } + + out[-1] = '\0'; // overwrite last space + return _work_buffer; +} + + +/* + * Convert the SQLWCHAR array to ucs4. + * At most count elements will be present. + * + * src is assumed to be in utf16 encoding. If the driver manager uses + * utf32 (ucs4) this will not be called. + * + * note: in our context the number of characters is known an comes from + * the database schema. + */ +static void +convert_ucs4_from_utf16(void *dst, const void *src, size_t count) +{ + uint32_t *ucs4_dst = reinterpret_cast(dst); + const uint16_t *utf16_src = reinterpret_cast(src); + //run until we reach the maximum number of characters (count), + // or null-termination (*utf16_src) + for (size_t idx=0; idx < count && *utf16_src; ++idx) { + uint16_t ch = *utf16_src++; + uint32_t ucs4_ch; + if (ch >= 0xd800 && ch <= 0xdfff) { + // surrogate pair + uint32_t upper = 0x3ffu & ch; + uint32_t lower = 0x3ffu & (*utf16_src++); + ucs4_ch = (upper << 10) + lower; + } + else { + ucs4_ch = ch; + } + + ucs4_dst[idx] = ucs4_ch; + } +} + + +// +// Fill NA particular values depending on the NumPy type +// +// The only cases that need to be supported are the ones that can +// actually be generated from SQL types +static void +fill_NAvalue(void *value, PyArray_Descr *dtype) { + int nptype = dtype->type_num; + int elsize = dtype->elsize; + switch (nptype) + { + case NPY_BOOL: + ((npy_bool*)value)[0] = 0; // XXX False is a good default? + break; + case NPY_INT8: + ((npy_int8*)value)[0] = NPY_MIN_INT8; + break; + case NPY_UINT8: + // For uint8 use max, as 0 is more likely to be valid data. + ((npy_uint8*)value)[0] = NPY_MAX_UINT8; + break; + case NPY_INT16: + ((npy_int16*)value)[0] = NPY_MIN_INT16; + break; + case NPY_INT32: + ((npy_int32*)value)[0] = NPY_MIN_INT32; + break; + case NPY_INT64: + ((npy_int64*)value)[0] = NPY_MIN_INT64; + break; + case NPY_FLOAT: + ((npy_float *)value)[0] = NPY_NANF; + break; + case NPY_DOUBLE: + ((npy_double *)value)[0] = NPY_NAN; + break; + case NPY_STRING: + case NPY_UNICODE: + memset(value, 0, static_cast(elsize)); + break; + case NPY_DATETIME: + ((npy_int64*)value)[0] = NPY_DATETIME_NAT; + break; + case NPY_TIMEDELTA: + ((npy_int64*)value)[0] = NPY_DATETIME_NAT; + break; + default: + RaiseErrorV(0, PyExc_TypeError, + "NumPy data type %d is not supported.", nptype); + } +} + +static int +fill_NAarray(PyArrayObject* array, PyArrayObject* array_nulls, SQLLEN* nulls, + size_t offset, size_t nrows) +{ + // Fill array with NA info in nullarray coming from ODBC + npy_intp elsize_array = PyArray_ITEMSIZE(array); + char *data_array = PyArray_BYTES(array); + SQLLEN *data_null = nulls; + + // Only the last nrows have to be updated + data_array += offset * elsize_array; + + if (array_nulls) { + char *data_array_nulls = PyArray_BYTES(array_nulls); + npy_intp elsize_array_nulls = PyArray_ITEMSIZE(array_nulls); + + data_array_nulls += offset * elsize_array_nulls; + + for (size_t i = 0; i < nrows; ++i) { + if (data_null[i] == SQL_NULL_DATA) { + *data_array_nulls = NPY_TRUE; + fill_NAvalue(data_array, PyArray_DESCR(array)); + } else + { + *data_array_nulls = NPY_FALSE; + } + data_array += elsize_array; + data_array_nulls += elsize_array_nulls; + } + } else + { + for (size_t i = 0; i < nrows; ++i) { + // If NULL are detected, don't show data in array + if (data_null[i] == SQL_NULL_DATA) + fill_NAvalue(data_array, PyArray_DESCR(array)); + data_array += elsize_array; + } + } + + return 0; +} + +// +// convert from ODBC format to NumPy format for selected types +// only types that need conversion are handled. +// +static void +convert_buffer(PyArrayObject* dst_array, void* src, int sql_c_type, + SQLLEN offset, npy_intp nrows) +{ + switch (sql_c_type) + { + case SQL_C_TYPE_DATE: + { + npy_datetime *dst = reinterpret_cast(PyArray_DATA(dst_array)) + + offset; + DATE_STRUCT *dates = static_cast(src); + for (npy_intp i = 0; i < nrows; ++i) { + dst[i] = convert_datestruct_to_datetime(dates+i); + } + } + break; + + case SQL_C_TYPE_TIMESTAMP: + { + npy_datetime *dst = reinterpret_cast(PyArray_DATA(dst_array)) + + offset; + TIMESTAMP_STRUCT *timestamps = static_cast(src); + for (npy_intp i = 0; i < nrows; ++i) { + dst[i] = convert_datetimestruct_to_datetime(timestamps+i); + } + } + break; + + case SQL_C_TYPE_TIME: + { + npy_timedelta *dst = reinterpret_cast(PyArray_DATA(dst_array)) + + offset; + TIME_STRUCT *timestamps = static_cast(src); + for (npy_intp i = 0; i < nrows; ++i) { + dst[i] = convert_timestruct_to_timedelta(×tamps[i]); + } + } + break; + + case SQL_C_WCHAR: + { + // note that this conversion will only be called when using ucs2/utf16 + const SQLWCHAR *utf16 = reinterpret_cast(src); + size_t len = PyArray_ITEMSIZE(dst_array)/sizeof(npy_ucs4); + npy_ucs4 *ucs4 = reinterpret_cast(PyArray_DATA(dst_array)) + offset*len; + for (npy_intp i = 0; i < nrows; ++i) { + const SQLWCHAR *src = utf16 + 2*len*i; + npy_ucs4 *dst = ucs4 + len*i; + TRACE_NOLOC("Converting utf-16 buffer at %p:\n'%s'\n", src, + raw_buffer_as_print_string(src, 2*len*sizeof(src[0]))); + convert_ucs4_from_utf16(dst, src, len); + TRACE_NOLOC("resulting in ucs4 buffer at %p:\n'%s'\n", dst, + raw_buffer_as_print_string(dst, len*sizeof(dst[0]))); + } + } + break; + + default: + TRACE_NOLOC("WARN: unexpected conversion in fill_dictarray.\n"); + } +} + +// +// Resize an array to a new length +// +// return 0 on success 1 on failure +// on failure the returned array is unmodified +static int +resize_array(PyArrayObject* array, npy_intp new_len) { + int elsize = PyArray_ITEMSIZE(array); + void *old_data = PyArray_DATA(array); + npy_intp old_len = PyArray_DIMS(array)[0]; + void* new_data = NULL; + + // The next test is made so as to avoid a problem with resizing to 0 + // (it seems that this is solved for NumPy 1.7 series though) + if (new_len > 0) { + new_data = PyDataMem_RENEW(old_data, new_len * elsize); + if (new_data == NULL) { + return 1; + } + } + else { + free(old_data); + } + + // this is far from ideal. We should probably be using internal buffers + // and then creating the NumPy array using that internal buffer. This should + // be possible and would be cleaner. +#if (NPY_API_VERSION >= 0x7) + ((PyArrayObject_fields *)array)->data = (char*)new_data; +#else + array->data = (char*)new_data; +#endif + if ((old_len < new_len) && PyArray_ISSTRING(array)) { + memset(PyArray_BYTES(array) + old_len*elsize, 0, (new_len-old_len)*elsize); + } + + PyArray_DIMS(array)[0] = new_len; + + return 0; +} + +namespace +{ + struct fetch_status + { + fetch_status(SQLHSTMT h, SQLULEN chunk_size); + ~fetch_status(); + + SQLLEN rows_read_; + + /* old stmtattr to restore on destruction */ + SQLHSTMT hstmt_; + SQLULEN old_row_bind_type_; + SQLULEN old_row_array_size_; + SQLULEN *old_rows_fetched_ptr_; + }; + + fetch_status::fetch_status(SQLHSTMT h, SQLULEN chunk_size) : hstmt_(h) + { + /* keep old stmt attr */ + SQLGetStmtAttr(hstmt_, SQL_ATTR_ROW_BIND_TYPE, + &old_row_bind_type_, SQL_IS_UINTEGER, 0); + SQLGetStmtAttr(hstmt_, SQL_ATTR_ROW_ARRAY_SIZE, + &old_row_array_size_, SQL_IS_UINTEGER, 0); + SQLGetStmtAttr(hstmt_, SQL_ATTR_ROWS_FETCHED_PTR, + &old_rows_fetched_ptr_, SQL_IS_POINTER, 0); + + /* configure our stmt attr */ + SQLSetStmtAttr(hstmt_, SQL_ATTR_ROW_BIND_TYPE, + SQL_BIND_BY_COLUMN, 0); + SQLSetStmtAttr(hstmt_, SQL_ATTR_ROW_ARRAY_SIZE, + (SQLPOINTER)chunk_size, 0); + SQLSetStmtAttr(hstmt_, SQL_ATTR_ROWS_FETCHED_PTR, + (SQLPOINTER)&rows_read_, 0); + } + + fetch_status::~fetch_status() + { + /* unbind all cols */ + SQLFreeStmt(hstmt_, SQL_UNBIND); + /* restore stmt attr */ + SQLSetStmtAttr(hstmt_, SQL_ATTR_ROW_BIND_TYPE, + (SQLPOINTER)old_row_bind_type_, 0); + SQLSetStmtAttr(hstmt_, SQL_ATTR_ROW_ARRAY_SIZE, + (SQLPOINTER)old_row_array_size_, 0); + SQLSetStmtAttr(hstmt_, SQL_ATTR_ROWS_FETCHED_PTR, + (SQLPOINTER)old_rows_fetched_ptr_, 0); + hstmt_ = 0; + } + + //////////////////////////////////////////////////////////////////////// + + struct column_desc + { + column_desc(); + ~column_desc(); + + // fields coming from describe col + SQLCHAR sql_name_[300]; + SQLSMALLINT sql_type_; // type returned in SQLDescribeCol. + SQLULEN sql_size_; + SQLSMALLINT sql_decimal_; + SQLSMALLINT sql_nullable_; + + // type info + PyArray_Descr* npy_type_descr_; // type to be used in NumPy + int sql_c_type_; // c_type to be use when binding the column. + + // buffers used + PyArrayObject* npy_array_; // the numpy array that will hold the result + PyArrayObject* npy_array_nulls_; // the boolean numpy array holding null information + void* scratch_buffer_; // source buffer when it needs conversion + SQLLEN* null_buffer_; + SQLLEN element_buffer_size_; + }; + + column_desc::column_desc() : + npy_type_descr_(0), npy_array_(0), npy_array_nulls_(0), scratch_buffer_(0), null_buffer_(0), element_buffer_size_(0) + { + } + + column_desc::~column_desc() + { + if (null_buffer_) { + GUARDED_DEALLOC(null_buffer_); + } + + if (scratch_buffer_) { + GUARDED_DEALLOC(scratch_buffer_); + } + + Py_XDECREF(npy_array_nulls_); + Py_XDECREF(npy_array_); + Py_XDECREF(npy_type_descr_); + } + + + inline PyArray_Descr* + dtype_from_string(const char *dtype_str_spec) + /* + returns a dtype (PyArray_Descr) built from a string that describes it + */ + { + PyObject *python_str = Py_BuildValue("s", dtype_str_spec); + if (python_str) { + PyArray_Descr *dtype = 0; + PyArray_DescrConverter(python_str, &dtype); + Py_DECREF(python_str); + return dtype; + } + return 0; + } + + inline PyArray_Descr* + string_dtype(size_t length) + { + PyArray_Descr* result = PyArray_DescrNewFromType(NPY_STRING); + if (result) + result->elsize = static_cast(length+1) * sizeof(char); + return result; + } + + inline PyArray_Descr* + unicode_dtype(size_t length) + { + PyArray_Descr* result = PyArray_DescrNewFromType(NPY_UNICODE); + if (result) + result->elsize = static_cast(length+1) * sizeof(npy_ucs4); + return result; + } + + int + map_column_desc_types(column_desc& cd, bool unicode) + /* + infer the NumPy dtype and the sql_c_type to use from the + sql_type. + + return 0 on success, 1 on failure + + remember to check support for any new NumPy type added in the function + that handles nulls (fill_NAvalue) + */ + { + PyArray_Descr* dtype = 0; + +#define MAP_SUCCESS(DTYPE, CTYPE) do { \ + cd.npy_type_descr_ = DTYPE; \ + cd.sql_c_type_ = CTYPE; \ + return 0; } while (0) + + + size_t sql_size = cd.sql_size_; + + + switch (cd.sql_type_) + { + // string types ------------------------------------------------ + case SQL_CHAR: + case SQL_VARCHAR: + case SQL_LONGVARCHAR: + case SQL_GUID: + case SQL_SS_XML: + if (!unicode) { + dtype = string_dtype(limit_text_size(sql_size)); + if (dtype) { + cd.element_buffer_size_ = dtype->elsize; + MAP_SUCCESS(dtype, SQL_C_CHAR); + } + break; + } + // else: fallthrough + + case SQL_WCHAR: + case SQL_WVARCHAR: + case SQL_WLONGVARCHAR: + { + dtype = unicode_dtype(limit_text_size(sql_size)); + if (dtype) { + cd.element_buffer_size_ = dtype->elsize; + MAP_SUCCESS(dtype, SQL_C_WCHAR); + } + } + break; + + // real types -------------------------------------------------- + case SQL_REAL: + dtype = PyArray_DescrFromType(NPY_FLOAT); + if (dtype) { + MAP_SUCCESS(dtype, SQL_C_FLOAT); + } + break; + + case SQL_FLOAT: + case SQL_DOUBLE: + dtype = PyArray_DescrFromType(NPY_DOUBLE); + if (dtype) { + MAP_SUCCESS(dtype, SQL_C_DOUBLE); + } + break; + + // integer types ----------------------------------------------- + case SQL_BIT: + dtype = PyArray_DescrFromType(NPY_BOOL); + if (dtype) { + MAP_SUCCESS(dtype, SQL_C_BIT); + } + break; + + case SQL_TINYINT: + dtype = PyArray_DescrFromType(NPY_UINT8); + if (dtype) { + MAP_SUCCESS(dtype, SQL_C_TINYINT); + } + break; + + case SQL_SMALLINT: + dtype = PyArray_DescrFromType(NPY_INT16); + if (dtype) { + MAP_SUCCESS(dtype, SQL_C_SSHORT); + } + break; + + case SQL_INTEGER: + dtype = PyArray_DescrFromType(NPY_INT32); + if (dtype) { + MAP_SUCCESS(dtype, SQL_C_SLONG); + } + break; + + case SQL_BIGINT: + dtype = PyArray_DescrFromType(NPY_INT64); + if (dtype) { + MAP_SUCCESS(dtype, SQL_C_SBIGINT); + } + break; + + // time related types ------------------------------------------ + case SQL_TYPE_DATE: + if (CAN_USE_DATETIME) { + dtype = dtype_from_string("M8[D]"); + if (dtype) { + MAP_SUCCESS(dtype, SQL_C_TYPE_DATE); + } + } + break; + + case SQL_TYPE_TIME: + case SQL_SS_TIME2: + if (CAN_USE_DATETIME) { + dtype = dtype_from_string("m8[s]"); + if (dtype) { + MAP_SUCCESS(dtype, SQL_C_TYPE_TIME); + } + } + break; + + case SQL_TYPE_TIMESTAMP: + if (CAN_USE_DATETIME) { + dtype = dtype_from_string("M8[us]"); + if (dtype) { + MAP_SUCCESS(dtype, SQL_C_TYPE_TIMESTAMP); + } + } + break; + + // decimal ----------------------------------------------------- + // Note: these are mapped as double as per a request + // this means precision may be lost. + case SQL_DECIMAL: + case SQL_NUMERIC: + dtype = PyArray_DescrFromType(NPY_DOUBLE); + if (dtype) { + MAP_SUCCESS(dtype, SQL_C_DOUBLE); + } + break; + + // unsupported types ------------------------------------------- + // this includes: + // blobs: + // SQL_BINARY, SQL_VARBINARY, SQL_LONGVARBINARY + default: + break; + + } +#undef MAP_SUCCESS + + TRACE_NOLOC("WARN: Failed translation of SQL\n\ttype: %s(%d)\n\tsize: %d\n\tuse_unicode: %s\n", + sql_type_to_str(cd.sql_type_), (int)cd.sql_type_, (int)cd.sql_size_, + unicode ? "Yes":"No"); + + return 1; + } + + struct query_desc + { + SQLRETURN init_from_statement(SQLHSTMT hstmt); + SQLRETURN bind_cols(); + + void lowercase_fields(); + int translate_types(bool use_unicode); + int ensure(); + void convert(size_t read); + void advance(size_t read); + + int allocate_buffers(size_t initial_result_count, size_t chunk_size, bool keep_nulls); + int resize(size_t new_count); + void cleanup(); + + void dump_column_mapping() const; + + query_desc(): allocated_results_count_(0), chunk_size_(0), offset_(0) {} + + std::vector columns_; + size_t allocated_results_count_; + size_t chunk_size_; + size_t offset_; + SQLHSTMT hstmt_; + }; + + SQLRETURN + query_desc::init_from_statement(SQLHSTMT hstmt) + /* + Fill the column descriptor from the sql statement handle hstmt. + + returns SQL_SUCCESS if successful, otherwise it returns the + SQLRESULT from the SQL command that failed. + */ + { + cleanup(); + + hstmt_ = hstmt; + + SQLRETURN ret; + SQLSMALLINT field_count = 0; + + ret = SQLNumResultCols(hstmt, &field_count); + + if (!SQL_SUCCEEDED(ret)) + return ret; + + columns_.resize(field_count); + // columns are 1 base on ODBC... + for (SQLSMALLINT field = 1; field <= field_count; field++) + { + column_desc& c_desc = columns_[field-1]; + ret = SQLDescribeCol(hstmt, + field, + &c_desc.sql_name_[0], + _countof(c_desc.sql_name_), + NULL, + &c_desc.sql_type_, + &c_desc.sql_size_, + &c_desc.sql_decimal_, + &c_desc.sql_nullable_); + + if (!SQL_SUCCEEDED(ret)) + return ret; + } + + return SQL_SUCCESS; + } + + SQLRETURN + query_desc::bind_cols() + { + SQLUSMALLINT col_number = 1; + + TRACE_NOLOC("\nBinding columns:\n"); + for (std::vector::iterator it = columns_.begin(); + it < columns_.end(); ++it) + { + void *bind_ptr; + if (it->scratch_buffer_) { + bind_ptr = it->scratch_buffer_; + } + else { + PyArrayObject* array = it->npy_array_; + bind_ptr = static_cast(PyArray_BYTES(array) + + (offset_*PyArray_ITEMSIZE(array))); + } + + TRACE_NOLOC("\tcolumn:%-10.10s address:%p %s\n", + it->sql_name_, bind_ptr, + bind_ptr==it->scratch_buffer_?"(scratch)":""); + SQLRETURN status = SQLBindCol(hstmt_, col_number, it->sql_c_type_, + bind_ptr, it->element_buffer_size_ , + it->null_buffer_); + if (!SQL_SUCCEEDED(status)) { + return status; + } + + col_number++; + } + + return SQL_SUCCESS; + } + + void + query_desc::lowercase_fields() + /* + Converts all the field names to lowercase + */ + { + for (std::vector::iterator it = columns_.begin(); + it < columns_.end(); ++it) + { + _strlwr((char*)&it->sql_name_[0]); + } + } + + int + query_desc::translate_types(bool use_unicode) + /* + Performs the mapping of types from SQL to numpy dtype and C type. + returns a count with the number of failed translations + */ + { + int failed_translations = 0; + for (std::vector::iterator it = columns_.begin(); + it < columns_.end(); ++it) + { + failed_translations += map_column_desc_types(*it, use_unicode); + } + + return failed_translations; + } + + int + query_desc::allocate_buffers(size_t buffer_element_count, + size_t chunk_element_count, + bool keep_nulls) + /* + allocate buffers to execute the query. + row_count: initial rows to preallocate for the results + chunk_row_count: rows to allocate for "per-chunk" buffers + + returns the number of failed allocations. + */ + { + int alloc_errors = 0; + npy_intp npy_array_count = static_cast(buffer_element_count); + + TRACE_NOLOC("\nAllocating arrays for column data:\n"); + for (std::vector::iterator it = columns_.begin(); + it < columns_.end(); ++it) + { + // Allocate the numpy buffer for the result + PyObject *arr = PyArray_SimpleNewFromDescr(1, &npy_array_count, + it->npy_type_descr_); + if (!arr) { + // failed to allocate mem_buffer + alloc_errors++; + continue; + } + PyArrayObject *array = reinterpret_cast(arr); + + if (PyArray_ISSTRING(array)) { + // clear memory on strings or undefined + memset(PyArray_BYTES(array), 0, buffer_element_count*PyArray_ITEMSIZE(array)); + } + + it->npy_array_ = array; + + if (!arr) + alloc_errors ++; + + TRACE_NOLOC("\tcolumn: %-10.10s address: %p\n", it->sql_name_, PyArray_DATA(array)); + // SimpleNewFromDescr steals the reference for the dtype + Py_INCREF(it->npy_type_descr_); + // if it is a type that needs to perform conversion, + // allocate a buffer for the data to be read in. + // + // TODO: make the type logic decide what size per element + // it needs (if any). this will make the logic about + // conversion simpler. + switch (it->sql_c_type_) + { + case SQL_C_TYPE_DATE: + { + void *mem = GUARDED_ALLOC(chunk_element_count * + sizeof(DATE_STRUCT)); + it->scratch_buffer_ = mem; + if (!mem) + alloc_errors ++; + } + break; + case SQL_C_TYPE_TIMESTAMP: + { + void *mem = GUARDED_ALLOC(chunk_element_count * + sizeof(TIMESTAMP_STRUCT)); + it->scratch_buffer_ = mem; + if (!mem) + alloc_errors ++; + } + break; + case SQL_C_TYPE_TIME: + { + void *mem = GUARDED_ALLOC(chunk_element_count * + sizeof(TIME_STRUCT)); + it->scratch_buffer_ = mem; + if (!mem) + alloc_errors ++; + } + break; + case SQL_C_WCHAR: + { + // this case is quite special, as a scratch + // buffer/conversions will only be needed when the + // underlying ODBC manager does not use UCS4 for + // its unicode strings. + // + // - MS ODBC manager uses UTF-16, which may + // include surrogates (thus variable length encoded). + // + // - unixODBC seems to use UCS-2, which is + // compatible with UTF-16, but may not include + // surrogates limiting encoding to Basic + // Multilingual Plane (not sure about this, it + // will be handled using the same codepath as MS + // ODBC, so it will work even if it produces + // surrogates). + // + // - iODBC uses UCS-4 (UTF-32), so it shouldn't + // need any kind of translation. + // + // In order to check if no translation is needed, the + // size of SQLWCHAR is used. + if (sizeof(SQLWCHAR) == 2) { + TRACE_NOLOC("\tscratch memory for unicode conversion (sizeof(SQLWCHAR) is %d)\n", (int)sizeof(SQLWCHAR)); + + size_t item_count = PyArray_ITEMSIZE(it->npy_array_) / sizeof(npy_ucs4); + // 2 due to possibility of surrogate. + // doing the math, the final buffer could be used instead of a scratch + // buffer, but would require code that can do the conversion in-place. + void *mem = GUARDED_ALLOC(chunk_element_count * item_count * + sizeof(SQLWCHAR) * 2); + it->scratch_buffer_ = mem; + if (!mem) + alloc_errors ++; + } + } + break; + default: + break; + } + + if (it->sql_nullable_) { + // if the type is nullable, allocate a buffer for null + // data (ODBC buffer, that has SQLLEN size) + void *mem = GUARDED_ALLOC(chunk_element_count * sizeof(SQLLEN)); + it->null_buffer_ = static_cast(mem); + if (!mem) + alloc_errors ++; + + if (keep_nulls) + { + // also allocate a numpy array for bools if null data is wanted + arr = PyArray_SimpleNew(1, &npy_array_count, NPY_BOOL); + it->npy_array_nulls_ = reinterpret_cast(arr); + if (!it->npy_array_nulls_) + alloc_errors++; + } + } + } + + if (!alloc_errors) + { + allocated_results_count_ = buffer_element_count; + chunk_size_ = chunk_element_count; + } + + return alloc_errors; + } + + int + query_desc::resize(size_t new_size) + /* + resize the numpy array elements to the new_size. + the chunk_size and associated buffers are to be preserved. + */ + { + int alloc_fail = 0; + npy_intp size = static_cast(new_size); + for (std::vector::iterator it = columns_.begin(); + it < columns_.end(); ++it) + { + void *old_data=PyArray_DATA(it->npy_array_); + int failed = resize_array(it->npy_array_, size); + void *new_data=PyArray_DATA(it->npy_array_); + + TRACE_NOLOC("Array for column %s moved. %p -> %p", it->sql_name_, old_data, new_data); + // if it has an array for nulls, resize it as well + if (it->npy_array_nulls_) + { + failed += resize_array(it->npy_array_nulls_, size); + } + + if (failed) + alloc_fail += failed; + } + + if (!alloc_fail) + allocated_results_count_ = new_size; + + return alloc_fail; + } + + int + query_desc::ensure() + /* + make sure there is space allocated for the next step + return 0 if everything ok, any other value means a problem was found + due to resizing + */ + { + if (allocated_results_count_ < offset_ + chunk_size_) + { + return resize(offset_ + chunk_size_); + } + + return 0; + } + + void + query_desc::convert(size_t count) + /* + Converts any column that requires conversion from the type returned + by odbc to the type expected in numpy. Right now this is only needed + for fields related to time. Note that odbc itself may handle other + conversions, like decimal->double with the appropriate SQLBindCol. + + The conversion also includes the handling of nulls. In the case of + NULL a default value is inserted in the resulting column. + */ + { + for (std::vector::iterator it = columns_.begin(); + it < columns_.end(); ++it) + { + // TODO: It should be possible to generalize this and make it + // more convenient to add types if a conversion function + // was placed in the column structure. + // Probably nulls could be handled by that conversion + // function as well. + if (it->scratch_buffer_) { // a conversion is needed + CHECK_ALLOC_GUARDS(it->scratch_buffer_, + "scratch buffer for field %s\n", + it->sql_name_); + convert_buffer(it->npy_array_, + it->scratch_buffer_, it->sql_c_type_, + offset_, count); + } + + if (it->null_buffer_) { // nulls are present + CHECK_ALLOC_GUARDS(it->null_buffer_, + "null buffer for field %s\n", + it->sql_name_); + fill_NAarray(it->npy_array_, + it->npy_array_nulls_, + it->null_buffer_, + offset_, count); + } + } + } + + void + query_desc::advance(size_t count) + /* + Advance the current position + */ + { + offset_ += count; + } + + void + query_desc::cleanup() + { + std::vector tmp; + columns_.swap(tmp); + } + + void + query_desc::dump_column_mapping() const + { + const char* fmt_str_head = "%-20.20s %-15.15s %-10.10s %-8.8s %-20.20s\n"; + const char* fmt_str = "%-20.20s %-15.15s %-10u %-8.8s %-20.20s\n"; + const char* dashes = "----------------------------------------"; + TRACE_NOLOC(fmt_str_head, "name", "sql type", "size", "null?", "c type"); + TRACE_NOLOC(fmt_str_head, dashes, dashes, dashes, dashes, dashes); + for (std::vector::const_iterator it = columns_.begin(); + it < columns_.end(); ++it) + { + TRACE_NOLOC(fmt_str, it->sql_name_, sql_type_to_str(it->sql_type_), + it->sql_size_, + it->sql_nullable_?"null":"not null", + sql_c_type_to_str(it->sql_c_type_)); + } + } +} + +size_t +print_error_types(query_desc& qd, size_t err_count, char *buff, + size_t buff_size) +{ + size_t acc = snprintf(buff, buff_size, + "%d fields with unsupported types found:\n", + (int)err_count); + + for (std::vector::iterator it = qd.columns_.begin(); + it < qd.columns_.end(); ++it) + { + if (0 == it->npy_type_descr_) { + // if numpy type descr is empty means a failed translation. + acc += snprintf(buff+acc, acc < buff_size? buff_size - acc : 0, + "\t'%s' type: %s (%d) size: %d decimal: %d\n", + it->sql_name_, + sql_type_to_str(it->sql_type_), (int)it->sql_type_, + (int)it->sql_size_, (int)it->sql_decimal_); + } + } + + return acc; +} + +int +raise_unsupported_types_exception(int err_count, query_desc& qd) +{ + char error[4096]; + char *use_string = error; + size_t count = print_error_types(qd, err_count, error, sizeof(error)); + + if (count >= sizeof(error)) + { + // did not fit, truncated + char *error_alloc = (char*)GUARDED_ALLOC(count); + if (error_alloc) { + use_string = error_alloc; + print_error_types(qd, count, error_alloc, count); + } + } + + RaiseErrorV(0, PyExc_TypeError, use_string); + + if (use_string != error) { + // we had to allocate + GUARDED_DEALLOC(use_string); + } + return 0; +} + +/** +// Takes an ODBC cursor object and creates a Python dictionary of +// NumPy arrays. It also creates some helpers for the NULLS, and +// datetimes. +// +// This is called after the ODBC query is complete. +// +// @param cur The ODBC cursor object. +// +// @param nrows The number of rows that were returned by the query. +// +// @param lower If true, makes the column names in the NumPy dtype all +// lowercase. +// +// @returns 0 on success +*/ +static int +perform_array_query(query_desc& result, Cursor* cur, npy_intp nrows, bool lower, bool want_nulls) +{ + SQLRETURN rc; + /* XXX is true a good default? + was: bool use_unicode = cur->cnxn->unicode_results; */ + bool use_unicode = true; + size_t outsize, chunk_size; + + if (nrows < 0) { + // chunked, no know final size + outsize = DEFAULT_ROWS_TO_BE_ALLOCATED; + chunk_size = DEFAULT_ROWS_TO_BE_FETCHED; + } + else { + // all in one go + outsize = static_cast(nrows); + chunk_size = static_cast(nrows); + } + + I(cur->hstmt != SQL_NULL_HANDLE && cur->colinfos != 0); + + if (cur->cnxn->hdbc == SQL_NULL_HANDLE) + { + /* + Is this needed or just convenient? + Won't ODBC fail gracefully (through an ODBC error code) when + trying to use a bad handle? + */ + return 0 == RaiseErrorV(0, ProgrammingError, + "The cursor's connection was closed."); + } + + { + PyNoGIL ctxt; + rc = result.init_from_statement(cur->hstmt); + } + + if (cur->cnxn->hdbc == SQL_NULL_HANDLE) + { + // The connection was closed by another thread in the + // ALLOW_THREADS block above. + return 0 == RaiseErrorV(0, ProgrammingError, + "The cursor's connection was closed."); + } + + if (!SQL_SUCCEEDED(rc)) + { + // Note: The SQL Server driver sometimes returns HY007 here if + // multiple statements (separated by ;) were submitted. This + // is not documented, but I've seen it with multiple + // successful inserts. + return 0 == RaiseErrorFromHandle(cur->cnxn, + "ODBC failed to describe the resulting columns", + cur->cnxn->hdbc, cur->hstmt); + } + + if (lower) + result.lowercase_fields(); + + int unsupported_fields = result.translate_types(use_unicode); + if (unsupported_fields) + { + // TODO: add better diagnosis, pointing out the fields and + // their types in a human readable form. + return 0 == raise_unsupported_types_exception(unsupported_fields, result); + } + + if (pyodbc_tracing_enabled) + result.dump_column_mapping(); + + int allocation_errors = result.allocate_buffers(outsize, chunk_size, want_nulls); + if (allocation_errors) + { + return 0 == RaiseErrorV(0, PyExc_MemoryError, + "Can't allocate result buffers", + outsize); + } + + fetch_status status(cur->hstmt, result.chunk_size_); + do { + TRACE_NOLOC("Fetching %d rows..\n", result.chunk_size_); + int error = result.ensure(); + if (error) { + return 0 == RaiseErrorV(0, PyExc_MemoryError, + "Can't allocate result buffers"); + } + + rc = result.bind_cols(); + if (!SQL_SUCCEEDED(rc)) { + return 0 == RaiseErrorFromHandle(cur->cnxn, "ODBC failed when binding columns", + cur->cnxn->hdbc, cur->hstmt); + + } + + // Do the fetch + { + PyNoGIL ctxt; + rc = SQLFetchScroll(status.hstmt_, SQL_FETCH_NEXT, 0); + } + + + // Sometimes (test_exhaust_execute_buffer), the SQLite ODBC + // driver returns an error here, but it should not! I'm not + // sure that this solution is the correct one, but anyway. + if ((rc == SQL_NO_DATA) || (rc == -1)) { // XXX + //if (rc == SQL_NO_DATA) { + TRACE_NOLOC("No more data available (%d)\n", (int)rc); + break; + } + else if (rc < 0) { + PyErr_SetString(PyExc_RuntimeError, "error in SQLFetchScroll"); + return rc; + } + + // The next check creates false positives on SQLite, as the + // NumRowsFetched seems arbitrary (i.e. not set). Probably + // reveals a problem in the ODBC driver. + if (status.rows_read_ > static_cast(result.chunk_size_)) { + // Let's reset its value to 0 instead (the most probable value here) + TRACE_NOLOC("WARN: rows read reported is greater than requested (Read: %d, Requested: %d)\n", + static_cast(status.rows_read_), + static_cast(result.chunk_size_)); + + status.rows_read_ = 0; + } + + TRACE_NOLOC("\nConverting %d row(s)\n", status.rows_read_); + result.convert(status.rows_read_); + result.advance(status.rows_read_); + + // This exits the loop when the amount of rows was known + // a-priori, so it is enough with a single call + if (nrows >= 0) + break; + + // We assume that when the number of rows read is lower than + // the number we asked for, this means we are done. + } while(status.rows_read_ == static_cast(result.chunk_size_)); + + // Finally, shrink size of final container, if needed + if (result.offset_ < result.allocated_results_count_) { + int alloc_failures = result.resize(result.offset_); + if (alloc_failures) { + // note that this shouldn't be happening, as a shrinking realloc + // should always succeed! + TRACE_NOLOC("WARN: Unexpected failure when trying to shrink arrays"); + return 0 == RaiseErrorV(0, PyExc_MemoryError, + "Can't allocate result buffers"); + } + } + + return 0; +} + + +static PyObject* +query_desc_to_dictarray(query_desc& qd, const char *null_suffix) +/* + Build a dictarray (dictionary of NumPy arrays) from the query_desc + + returns the python dictionary object, or 0 if an error occurred. In case + of an error the appropriate python exception is raised. + */ +{ + PyObject *dictarray = PyDict_New(); + + if (dictarray) { + for (std::vector::iterator it = qd.columns_.begin(); + it < qd.columns_.end(); ++it) + { + int rv; + rv = PyDict_SetItemString(dictarray, + reinterpret_cast(it->sql_name_), + reinterpret_cast(it->npy_array_)); + + if (rv < 0) { + /* out of mem is very likely here */ + Py_DECREF(dictarray); + return 0; + } + + if (it->npy_array_nulls_) { + char column_nulls_name[350]; + snprintf(column_nulls_name, sizeof(column_nulls_name), "%s%s",it->sql_name_,null_suffix); + rv = PyDict_SetItemString(dictarray, + column_nulls_name, + reinterpret_cast(it->npy_array_nulls_)); + if (rv < 0) { + Py_DECREF(dictarray); + return 0; + } + } + } + } + + return dictarray; +} + +// +// Create and fill a dictarray out of a query +// +// arguments: +// cursor - cursor object to fetch the rows from +// nrows - number of rows to fetch, -1 for all rows +// null_suffix - suffix to add to the column name for the bool column holding the nulls. NULL means we don't want nulls. +static PyObject* +create_fill_dictarray(Cursor* cursor, npy_intp nrows, const char* null_suffix) +{ + int error; + query_desc qd; + + error = perform_array_query(qd, cursor, nrows, lowercase(), null_suffix != 0); + if (error) { + TRACE_NOLOC("WARN: perform_querydesc returned %d errors\n", error); + return 0; + } + + TRACE_NOLOC("\nBuilding dictarray.\n"); + PyObject *dictarray = query_desc_to_dictarray(qd, null_suffix); + if (!dictarray) { + TRACE_NOLOC("WARN: Failed to build dictarray from the query results.\n"); + return 0; + } + + return dictarray; +} + +// ----------------------------------------------------------------------------- +// Method implementation +// ----------------------------------------------------------------------------- +static char *Cursor_npfetch_kwnames[] = { + "size", // keyword to read the maximum number of rows. Defaults to all. + "return_nulls", // keyword to make a given fetch to add boolean columns for nulls + "null_suffix", // keyword providing the string to use as suffix +}; + + +// +// The main cursor.fetchdict() method +// + +PyObject* +Cursor_fetchdictarray(PyObject* self, PyObject* args, PyObject *kwargs) +{ + Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); + if (!cursor) + return 0; + + /* + note: ssize_t is used as a type for parse tuple as it looks like + the integer in ParseTuple that is more likely to have the same size + as a npy_intp + */ + TRACE("\n\nParse tuple\n"); + ssize_t nrows = -1; + PyObject *return_nulls = NULL; + const char *null_suffix = "_isnull"; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|nOs", Cursor_npfetch_kwnames, + &nrows, &return_nulls, &null_suffix)) + return 0; + + bool preserve_nulls = return_nulls?PyObject_IsTrue(return_nulls):false; + TRACE("Foo\n"); + TRACE_NOLOC("\n\nCursor fetchdictarray\n\tnrows:%d\n\treturn_nulls:%s\n\tnull_suffix:%s\n\thandle:%p\n\tunicode_results:%s\n", + (int)nrows, preserve_nulls?"yes":"no", null_suffix, (void*)cursor->hstmt); + /*cursor->cnxn->unicode_results?"Yes":"No");*/ + npy_intp arg = nrows; + PyObject *rv = create_fill_dictarray(cursor, arg, preserve_nulls?null_suffix:0); + TRACE_NOLOC("\nCursor fetchdictarray done.\n\tdictarray: %p\n\n", rv); + return rv; +} + +char fetchdictarray_doc[] = + "fetchdictarray(size=-1, return_nulls=False, null_suffix='_isnull')\n" \ + " --> a dictionary of column arrays.\n" \ + "\n" + "Fetch as many rows as specified by size into a dictionary of NumPy\n" \ + "ndarrays (dictarray). The dictionary will contain a key for each column,\n"\ + "with its value being a NumPy ndarray holding its value for the fetched\n" \ + "rows. Optionally, extra columns will be added to signal nulls on\n" \ + "nullable columns.\n" \ + "\n" \ + "Parameters\n" \ + "----------\n" \ + "size : int, optional\n" \ + " The number of rows to fetch. Use -1 (the default) to fetch all\n" \ + " remaining rows.\n" \ + "return_nulls : boolean, optional\n" \ + " If True, information about null values will be included adding a\n" \ + " boolean array using as key a string built by concatenating the\n" \ + " column name and null_suffix.\n" \ + "null_suffix : string, optional\n" \ + " A string used as a suffix when building the key for null values.\n"\ + " Only used if return_nulls is True.\n" \ + "\n" \ + "Returns\n" \ + "-------\n" \ + "out: dict\n" \ + " A dictionary mapping column names to an ndarray holding its values\n" \ + " for the fetched rows. The dictionary will use the column name as\n" \ + " key for the ndarray containing values associated to that column.\n" \ + " Optionally, null information for nullable columns will be provided\n" \ + " by adding additional boolean columns named after the nullable column\n"\ + " concatenated to null_suffix\n" \ + "\n" \ + "Remarks\n" \ + "-------\n" \ + "Similar to fetchmany(size), but returning a dictionary of NumPy ndarrays\n" \ + "for the results instead of a Python list of tuples of objects, reducing\n" \ + "memory footprint as well as improving performance.\n" \ + "fetchdictarray is overall more efficient that fetchsarray.\n" \ + "\n" \ + "See Also\n" \ + "--------\n" \ + "fetchmany : Fetch rows into a Python list of rows.\n" \ + "fetchall : Fetch the remaining rows into a Python lis of rows.\n" \ + "\n"; + + +#if PY_VERSION_HEX >= 0x03000000 +int NpContainer_init() +#else +void NpContainer_init() +#endif +{ + import_array(); + // If the version of Numpy is >= API 7 (Numpy 1.7), + // then enable datetime features. This allows datetime + // to work even if pyodbc is built against Numpy 1.5. + if (PyArray_GetNDArrayCFeatureVersion() >= 7) { + CAN_USE_DATETIME = true; + } + +#if PY_VERSION_HEX >= 0x03000000 + return 0; +#else + return; +#endif +} diff --git a/src/npcontainer.h b/src/npcontainer.h new file mode 100644 index 00000000..65adb0d7 --- /dev/null +++ b/src/npcontainer.h @@ -0,0 +1,17 @@ +#ifndef _NPCONTAINER_H_ +#define _NPCONTAINER_H_ + + +#if PY_VERSION_HEX >= 0x03000000 +int NpContainer_init(); +#else +void NpContainer_init(); +#endif + +PyObject *Cursor_fetchdictarray(PyObject *self, PyObject *args, PyObject *kwargs); + +extern char fetchdictarray_doc[]; + +extern Py_ssize_t iopro_text_limit; + +#endif // _NPCONTAINER_H_ diff --git a/src/params.cpp b/src/params.cpp index 4495252f..732f5daa 100644 --- a/src/params.cpp +++ b/src/params.cpp @@ -7,6 +7,11 @@ // // Column Size: "For character types, this is the length in characters of the data" +// NOTE: I have not ported the "fast executemany" code from 4.x to 5.x yet. Once 5.0 is +// complete, I'll port it in 5.1. My goal is to ensure it uses the exact same binding code +// between both code paths. I'll probably also rename the feature to something that describes +// it more precisely like "array binding". + #include "pyodbc.h" #include "wrapper.h" #include "textenc.h" @@ -14,7 +19,6 @@ #include "cursor.h" #include "params.h" #include "connection.h" -#include "buffer.h" #include "errors.h" #include "dbspecific.h" #include "row.h" @@ -26,6 +30,7 @@ inline Connection* GetConnection(Cursor* cursor) return (Connection*)cursor->cnxn; } +/* struct DAEParam { PyObject *cell; @@ -44,14 +49,6 @@ static int DetectCType(PyObject *cell, ParamInfo *pi) pi->ValueType = SQL_C_BIT; pi->BufferLength = 1; } -#if PY_MAJOR_VERSION < 3 - else if (PyInt_Check(cell)) - { - Type_Int: - pi->ValueType = sizeof(long) == 8 ? SQL_C_SBIGINT : SQL_C_LONG; - pi->BufferLength = sizeof(long); - } -#endif else if (PyLong_Check(cell)) { Type_Long: @@ -78,11 +75,7 @@ static int DetectCType(PyObject *cell, ParamInfo *pi) Type_Bytes: // Assume the SQL type is also character (2.x) or binary (3.x). // If it is a max-type (ColumnSize == 0), use DAE. -#if PY_MAJOR_VERSION < 3 - pi->ValueType = SQL_C_CHAR; -#else pi->ValueType = SQL_C_BINARY; -#endif pi->BufferLength = pi->ColumnSize ? pi->ColumnSize : sizeof(DAEParam); } else if (PyUnicode_Check(cell)) @@ -119,21 +112,12 @@ static int DetectCType(PyObject *cell, ParamInfo *pi) pi->BufferLength = sizeof(SQL_TIME_STRUCT); } } -#if PY_VERSION_HEX >= 0x02060000 else if (PyByteArray_Check(cell)) { Type_ByteArray: pi->ValueType = SQL_C_BINARY; pi->BufferLength = pi->ColumnSize ? pi->ColumnSize : sizeof(DAEParam); } -#endif -#if PY_MAJOR_VERSION < 3 - else if (PyBuffer_Check(cell)) - { - pi->ValueType = SQL_C_BINARY; - pi->BufferLength = pi->ColumnSize && PyBuffer_GetMemory(cell, 0) >= 0 ? pi->ColumnSize : sizeof(DAEParam); - } -#endif else if (cell == Py_None || cell == null_binary) { // Use the SQL type to guess what Nones should be inserted as here. @@ -155,11 +139,7 @@ static int DetectCType(PyObject *cell, ParamInfo *pi) case SQL_SMALLINT: case SQL_INTEGER: case SQL_TINYINT: -#if PY_MAJOR_VERSION < 3 - goto Type_Int; -#else goto Type_Long; -#endif case SQL_REAL: case SQL_FLOAT: case SQL_DOUBLE: @@ -169,11 +149,9 @@ static int DetectCType(PyObject *cell, ParamInfo *pi) case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: -#if PY_VERSION_HEX >= 0x02060000 - goto Type_ByteArray; -#else + // TODO: Shouldn't this be bytes? + // goto Type_ByteArray; goto Type_Bytes; -#endif case SQL_TYPE_DATE: goto Type_Date; case SQL_SS_TIME2: @@ -224,14 +202,6 @@ static int PyToCType(Cursor *cur, unsigned char **outbuf, PyObject *cell, ParamI return false; WRITEOUT(char, outbuf, cell == Py_True, ind); } -#if PY_MAJOR_VERSION < 3 - else if (PyInt_Check(cell)) - { - if (pi->ValueType != (sizeof(long) == 8 ? SQL_C_SBIGINT : SQL_C_LONG)) - return false; - WRITEOUT(long, outbuf, PyInt_AS_LONG(cell), ind); - } -#endif else if (PyLong_Check(cell)) { if (pi->ValueType == SQL_C_SBIGINT) @@ -261,8 +231,8 @@ static int PyToCType(Cursor *cur, unsigned char **outbuf, PyObject *cell, ParamI if (!scaler_table[pi->DecimalDigits - 1]) { if (!tenObject) - tenObject = PyInt_FromLong(10); - PyObject *scaleObj = PyInt_FromLong(pi->DecimalDigits); + tenObject = PyLong_FromLong(10); + PyObject *scaleObj = PyLong_FromLong(pi->DecimalDigits); scaler_table[pi->DecimalDigits - 1] = PyNumber_Power(tenObject, scaleObj, Py_None); Py_XDECREF(scaleObj); } @@ -290,11 +260,7 @@ static int PyToCType(Cursor *cur, unsigned char **outbuf, PyObject *cell, ParamI } else if (PyBytes_Check(cell)) { -#if PY_MAJOR_VERSION < 3 - if (pi->ValueType != SQL_C_CHAR) -#else if (pi->ValueType != SQL_C_BINARY) -#endif return false; Py_ssize_t len = PyBytes_GET_SIZE(cell); if (!pi->ColumnSize) // DAE @@ -327,66 +293,38 @@ static int PyToCType(Cursor *cur, unsigned char **outbuf, PyObject *cell, ParamI // Same size Different size // DAE DAE only Convert + DAE // non-DAE Copy Convert + Copy - if (sizeof(Py_UNICODE) != sizeof(SQLWCHAR)) - { - const TextEnc& enc = cur->cnxn->unicode_enc; - Object encoded(PyCodec_Encode(cell, enc.name, "strict")); - if (!encoded) - return false; + const TextEnc& enc = cur->cnxn->unicode_enc; + Object encoded(PyCodec_Encode(cell, enc.name, "strict")); + if (!encoded) + return false; - if (enc.optenc == OPTENC_NONE && !PyBytes_CheckExact(encoded)) - { - PyErr_Format(PyExc_TypeError, "Unicode write encoding '%s' returned unexpected data type: %s", - enc.name, encoded.Get()->ob_type->tp_name); - return false; - } + if (enc.optenc == OPTENC_NONE && !PyBytes_CheckExact(encoded)) + { + PyErr_Format(PyExc_TypeError, "Unicode write encoding '%s' returned unexpected data type: %s", + enc.name, encoded.Get()->ob_type->tp_name); + return false; + } - len = PyBytes_GET_SIZE(encoded); - if (!pi->ColumnSize) - { - // DAE - DAEParam *pParam = (DAEParam*)*outbuf; - pParam->cell = encoded.Detach(); - pParam->maxlen = cur->cnxn->GetMaxLength(pi->ValueType); - *outbuf += sizeof(DAEParam); - ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; - } - else - { - if (len > pi->BufferLength) - { - RaiseErrorV(0, ProgrammingError, "String data, right truncation: length %u buffer %u", len, pi->BufferLength); - return false; - } - memcpy(*outbuf, PyBytes_AS_STRING((PyObject*)encoded), len); - *outbuf += pi->BufferLength; - ind = len; - } + len = PyBytes_GET_SIZE(encoded); + if (!pi->ColumnSize) + { + // DAE + DAEParam *pParam = (DAEParam*)*outbuf; + pParam->cell = encoded.Detach(); + pParam->maxlen = cur->cnxn->GetMaxLength(pi->ValueType); + *outbuf += sizeof(DAEParam); + ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; } else { - len *= sizeof(SQLWCHAR); - - if (!pi->ColumnSize) // DAE - { - Py_INCREF(cell); - DAEParam *pParam = (DAEParam*)*outbuf; - pParam->cell = cell; - pParam->maxlen= cur->cnxn->GetMaxLength(pi->ValueType); - *outbuf += sizeof(DAEParam); - ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; - } - else + if (len > pi->BufferLength) { - if (len > pi->BufferLength) - { - RaiseErrorV(0, ProgrammingError, "String data, right truncation: length %u buffer %u", len, pi->BufferLength); - return false; - } - memcpy(*outbuf, PyUnicode_AS_DATA(cell), len); - *outbuf += pi->BufferLength; - ind = len; + RaiseErrorV(0, ProgrammingError, "String data, right truncation: length %u buffer %u", len, pi->BufferLength); + return false; } + memcpy(*outbuf, PyBytes_AS_STRING((PyObject*)encoded), len); + *outbuf += pi->BufferLength; + ind = len; } } else if (PyDateTime_Check(cell)) @@ -448,7 +386,6 @@ static int PyToCType(Cursor *cur, unsigned char **outbuf, PyObject *cell, ParamI ind = sizeof(SQL_TIME_STRUCT); } } -#if PY_VERSION_HEX >= 0x02060000 else if (PyByteArray_Check(cell)) { if (pi->ValueType != SQL_C_BINARY) @@ -475,38 +412,6 @@ static int PyToCType(Cursor *cur, unsigned char **outbuf, PyObject *cell, ParamI ind = len; } } -#endif -#if PY_MAJOR_VERSION < 3 - else if (PyBuffer_Check(cell)) - { - if (pi->ValueType != SQL_C_BINARY) - return false; - const char* pb; - Py_ssize_t len = PyBuffer_GetMemory(cell, &pb); - if (len < 0) - { - // DAE - DAEParam *pParam = (DAEParam*)*outbuf; - len = PyBuffer_Size(cell); - Py_INCREF(cell); - pParam->cell = cell; - pParam->maxlen = cur->cnxn->GetMaxLength(pi->ValueType); - *outbuf += pi->BufferLength; - ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)len) : SQL_DATA_AT_EXEC; - } - else - { - if (len > pi->BufferLength) - { - RaiseErrorV(0, ProgrammingError, "String data, right truncation: row %u column %u", 0 /* TODO */, 0 /* TODO */); - return false; - } - memcpy(*outbuf, pb, len); - *outbuf += pi->BufferLength; - ind = len; - } - } -#endif else if (IsInstanceForThread(cell, "uuid", "UUID", &cls) && cls) { if (pi->ValueType != SQL_C_GUID) @@ -535,9 +440,9 @@ static int PyToCType(Cursor *cur, unsigned char **outbuf, PyObject *cell, ParamI Py_XDECREF(normCell); SQL_NUMERIC_STRUCT *pNum = (SQL_NUMERIC_STRUCT*)*outbuf; - pNum->sign = !PyInt_AsLong(PyTuple_GET_ITEM(cellParts, 0)); + pNum->sign = !PyLong_AsLong(PyTuple_GET_ITEM(cellParts, 0)); PyObject* digits = PyTuple_GET_ITEM(cellParts, 1); - long exp = PyInt_AsLong(PyTuple_GET_ITEM(cellParts, 2)); + long exp = PyLong_AsLong(PyTuple_GET_ITEM(cellParts, 2)); Py_ssize_t numDigits = PyTuple_GET_SIZE(digits); // PyDecimal is digits * 10**exp = digits / 10**-exp @@ -553,11 +458,11 @@ static int PyToCType(Cursor *cur, unsigned char **outbuf, PyObject *cell, ParamI PyObject *newDigits = PyTuple_New(numDigits + scaleDiff); for (Py_ssize_t i = 0; i < numDigits; i++) { - PyTuple_SET_ITEM(newDigits, i, PyInt_FromLong(PyNumber_AsSsize_t(PyTuple_GET_ITEM(digits, i), 0))); + PyTuple_SET_ITEM(newDigits, i, PyLong_FromLong(PyNumber_AsSsize_t(PyTuple_GET_ITEM(digits, i), 0))); } for (Py_ssize_t i = numDigits; i < scaleDiff + numDigits; i++) { - PyTuple_SET_ITEM(newDigits, i, PyInt_FromLong(0)); + PyTuple_SET_ITEM(newDigits, i, PyLong_FromLong(0)); } PyObject *args = Py_BuildValue("((iOi))", 0, newDigits, 0); PyObject *scaledDecimal = PyObject_CallObject((PyObject*)cell->ob_type, args); @@ -597,6 +502,8 @@ static int PyToCType(Cursor *cur, unsigned char **outbuf, PyObject *cell, ParamI return true; } +*/ + static bool GetParamType(Cursor* cur, Py_ssize_t iParam, SQLSMALLINT& type); static void FreeInfos(ParamInfo* a, Py_ssize_t count) @@ -604,12 +511,12 @@ static void FreeInfos(ParamInfo* a, Py_ssize_t count) for (Py_ssize_t i = 0; i < count; i++) { if (a[i].allocated) - pyodbc_free(a[i].ParameterValuePtr); + PyMem_Free(a[i].ParameterValuePtr); if (a[i].ParameterType == SQL_SS_TABLE && a[i].nested) FreeInfos(a[i].nested, a[i].maxlength); Py_XDECREF(a[i].pObject); } - pyodbc_free(a); + PyMem_Free(a); } static bool GetNullInfo(Cursor* cur, Py_ssize_t index, ParamInfo& info) @@ -634,7 +541,6 @@ static bool GetNullBinaryInfo(Cursor* cur, Py_ssize_t index, ParamInfo& info) } -#if PY_MAJOR_VERSION >= 3 static bool GetBytesInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { // The Python 3 version that writes bytes as binary data. @@ -666,67 +572,6 @@ static bool GetBytesInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamIn return true; } -#endif - -#if PY_MAJOR_VERSION < 3 -static bool GetStrInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) -{ - const TextEnc& enc = cur->cnxn->str_enc; - - info.ValueType = enc.ctype; - - Py_ssize_t cch = PyString_GET_SIZE(param); - - info.ColumnSize = isTVP ? 0 : (SQLUINTEGER)max(cch, 1); - - Object encoded; - - if (enc.optenc == OPTENC_RAW) - { - // Take the text as-is. This is not really a good idea since users will need to make - // sure the encoding over the wire matches their system encoding, but it will be wanted - // and it is fast when you get it to work. - encoded = param; - } - else - { - // Encode the text with the user's encoding. - encoded = PyCodec_Encode(param, enc.name, "strict"); - if (!encoded) - return false; - - if (!PyBytes_CheckExact(encoded)) - { - // Not all encodings return bytes. - PyErr_Format(PyExc_TypeError, "Unicode read encoding '%s' returned unexpected data type: %s", - enc.name, encoded.Get()->ob_type->tp_name); - return false; - } - } - - Py_ssize_t cb = PyBytes_GET_SIZE(encoded); - info.pObject = encoded.Detach(); - - SQLLEN maxlength = cur->cnxn->GetMaxLength(info.ValueType); - if (maxlength == 0 || cb <= maxlength || isTVP) - { - info.ParameterType = (enc.ctype == SQL_C_CHAR) ? SQL_VARCHAR : SQL_WVARCHAR; - info.ParameterValuePtr = PyBytes_AS_STRING(info.pObject); - info.StrLen_or_Ind = (SQLINTEGER)cb; - } - else - { - // Too long to pass all at once, so we'll provide the data at execute. - info.ParameterType = (enc.ctype == SQL_C_CHAR) ? SQL_LONGVARCHAR : SQL_WLONGVARCHAR; - info.ParameterValuePtr = &info; - info.BufferLength = sizeof(ParamInfo*); - info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLINTEGER)cb) : SQL_DATA_AT_EXEC; - info.maxlength = maxlength; - } - - return true; -} -#endif static bool GetUnicodeInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) @@ -866,33 +711,6 @@ inline bool NeedsBigInt(long long ll) return ll < -2147483647 || ll > 2147483647; } -#if PY_MAJOR_VERSION < 3 -static bool GetIntInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) -{ - long long value = PyLong_AsLongLong(param); - if (PyErr_Occurred()) - return false; - - if (isTVP || NeedsBigInt(value)) - { - info.Data.i64 = (INT64)value; - info.ValueType = SQL_C_SBIGINT; - info.ParameterType = SQL_BIGINT; - info.ParameterValuePtr = &info.Data.i64; - info.StrLen_or_Ind = 8; - } - else - { - info.Data.i32 = (int)value; - info.ValueType = SQL_C_LONG; - info.ParameterType = SQL_INTEGER; - info.ParameterValuePtr = &info.Data.i32; - info.StrLen_or_Ind = 4; - } - return true; -} -#endif - static bool GetLongInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { // Since some drivers like Access don't support BIGINT, we use INTEGER when possible. @@ -955,14 +773,14 @@ static char* CreateDecimalString(long sign, PyObject* digits, long exp) // (1 2 3) exp = 2 --> '12300' len = sign + count + exp + 1; // 1: NULL - pch = (char*)pyodbc_malloc((size_t)len); + pch = (char*)PyMem_Malloc((size_t)len); if (pch) { char* p = pch; if (sign) *p++ = '-'; for (long i = 0; i < count; i++) - *p++ = (char)('0' + PyInt_AS_LONG(PyTuple_GET_ITEM(digits, i))); + *p++ = (char)('0' + PyLong_AS_LONG(PyTuple_GET_ITEM(digits, i))); for (long i = 0; i < exp; i++) *p++ = '0'; *p = 0; @@ -973,7 +791,7 @@ static char* CreateDecimalString(long sign, PyObject* digits, long exp) // (1 2 3) exp = -2 --> 1.23 : prec = 3, scale = 2 len = sign + count + 2; // 2: decimal + NULL - pch = (char*)pyodbc_malloc((size_t)len); + pch = (char*)PyMem_Malloc((size_t)len); if (pch) { char* p = pch; @@ -981,10 +799,10 @@ static char* CreateDecimalString(long sign, PyObject* digits, long exp) *p++ = '-'; int i = 0; for (; i < (count + exp); i++) - *p++ = (char)('0' + PyInt_AS_LONG(PyTuple_GET_ITEM(digits, i))); + *p++ = (char)('0' + PyLong_AS_LONG(PyTuple_GET_ITEM(digits, i))); *p++ = '.'; for (; i < count; i++) - *p++ = (char)('0' + PyInt_AS_LONG(PyTuple_GET_ITEM(digits, i))); + *p++ = (char)('0' + PyLong_AS_LONG(PyTuple_GET_ITEM(digits, i))); *p++ = 0; } } @@ -994,7 +812,7 @@ static char* CreateDecimalString(long sign, PyObject* digits, long exp) len = sign + -exp + 3; // 3: leading zero + decimal + NULL - pch = (char*)pyodbc_malloc((size_t)len); + pch = (char*)PyMem_Malloc((size_t)len); if (pch) { char* p = pch; @@ -1007,12 +825,12 @@ static char* CreateDecimalString(long sign, PyObject* digits, long exp) *p++ = '0'; for (int i = 0; i < count; i++) - *p++ = (char)('0' + PyInt_AS_LONG(PyTuple_GET_ITEM(digits, i))); + *p++ = (char)('0' + PyLong_AS_LONG(PyTuple_GET_ITEM(digits, i))); *p++ = 0; } } - I(pch == 0 || (int)(strlen(pch) + 1) == len); + assert(pch == 0 || (int)(strlen(pch) + 1) == len); return pch; } @@ -1027,7 +845,7 @@ static bool GetUUIDInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInf info.ColumnSize = 16; info.allocated = true; - info.ParameterValuePtr = pyodbc_malloc(sizeof(SQLGUID)); + info.ParameterValuePtr = PyMem_Malloc(sizeof(SQLGUID)); if (!info.ParameterValuePtr) { PyErr_NoMemory(); @@ -1057,9 +875,9 @@ static bool GetDecimalInfo(Cursor* cur, Py_ssize_t index, PyObject* param, Param if (!t) return false; - long sign = PyInt_AsLong(PyTuple_GET_ITEM(t.Get(), 0)); + long sign = PyLong_AsLong(PyTuple_GET_ITEM(t.Get(), 0)); PyObject* digits = PyTuple_GET_ITEM(t.Get(), 1); - long exp = PyInt_AsLong(PyTuple_GET_ITEM(t.Get(), 2)); + long exp = PyLong_AsLong(PyTuple_GET_ITEM(t.Get(), 2)); Py_ssize_t count = PyTuple_GET_SIZE(digits); @@ -1087,7 +905,7 @@ static bool GetDecimalInfo(Cursor* cur, Py_ssize_t index, PyObject* param, Param info.DecimalDigits = (SQLSMALLINT)info.ColumnSize; } - I(info.ColumnSize >= (SQLULEN)info.DecimalDigits); + assert(info.ColumnSize >= (SQLULEN)info.DecimalDigits); info.ParameterValuePtr = CreateDecimalString(sign, digits, exp); if (!info.ParameterValuePtr) @@ -1102,46 +920,7 @@ static bool GetDecimalInfo(Cursor* cur, Py_ssize_t index, PyObject* param, Param return true; } -#if PY_MAJOR_VERSION < 3 -static bool GetBufferInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info) -{ - info.ValueType = SQL_C_BINARY; - - const char* pb; - Py_ssize_t cb = PyBuffer_GetMemory(param, &pb); - - SQLLEN maxlength = cur->cnxn->GetMaxLength(info.ValueType); - if (maxlength == 0 || cb <= maxlength) - { - // There is one segment, so we can bind directly into the buffer object. - - info.ParameterType = SQL_VARBINARY; - info.ParameterValuePtr = (SQLPOINTER)pb; - info.BufferLength = (SQLINTEGER)cb; - info.ColumnSize = (SQLUINTEGER)max(cb, 1); - info.StrLen_or_Ind = (SQLINTEGER)cb; - } - else - { - // There are multiple segments, so we'll provide the data at execution time. Pass the PyObject pointer as - // the parameter value which will be passed back to us when the data is needed. (If we release threads, we - // need to up the refcount!) - - info.ParameterType = SQL_LONGVARBINARY; - info.ParameterValuePtr = &info; - info.BufferLength = sizeof(ParamInfo*); - info.ColumnSize = (SQLUINTEGER)PyBuffer_Size(param); - info.StrLen_or_Ind = cur->cnxn->need_long_data_len ? SQL_LEN_DATA_AT_EXEC((SQLLEN)PyBuffer_Size(param)) : SQL_DATA_AT_EXEC; - info.pObject = param; - Py_INCREF(info.pObject); - info.maxlength = maxlength; - } - - return true; -} -#endif -#if PY_VERSION_HEX >= 0x02060000 static bool GetByteArrayInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP) { info.ValueType = SQL_C_BINARY; @@ -1171,7 +950,6 @@ static bool GetByteArrayInfo(Cursor* cur, Py_ssize_t index, PyObject* param, Par } return true; } -#endif // TVP @@ -1182,6 +960,10 @@ static bool GetTableInfo(Cursor *cur, Py_ssize_t index, PyObject* param, ParamIn if (nrows > 0) { PyObject *cell0 = PySequence_GetItem(param, 0); + if (cell0 == NULL) + { + return false; + } Py_XDECREF(cell0); if (PyBytes_Check(cell0) || PyUnicode_Check(cell0)) { @@ -1229,13 +1011,8 @@ bool GetParameterInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& if (param == null_binary) return GetNullBinaryInfo(cur, index, info); -#if PY_MAJOR_VERSION >= 3 if (PyBytes_Check(param)) return GetBytesInfo(cur, index, param, info, isTVP); -#else - if (PyBytes_Check(param)) - return GetStrInfo(cur, index, param, info, isTVP); -#endif if (PyUnicode_Check(param)) return GetUnicodeInfo(cur, index, param, info, isTVP); @@ -1258,20 +1035,8 @@ bool GetParameterInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& if (PyFloat_Check(param)) return GetFloatInfo(cur, index, param, info); -#if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(param)) return GetByteArrayInfo(cur, index, param, info, isTVP); -#endif - -#if PY_MAJOR_VERSION < 3 - if (PyInt_Check(param)) - return GetIntInfo(cur, index, param, info, isTVP); - - if (PyBuffer_Check(param)) - return GetBufferInfo(cur, index, param, info); -#endif - - // Decimal PyObject* cls = 0; if (!IsInstanceForThread(param, "decimal", "Decimal", &cls)) @@ -1280,8 +1045,6 @@ bool GetParameterInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& if (cls != 0) return GetDecimalInfo(cur, index, param, info, cls); - // UUID - if (!IsInstanceForThread(param, "uuid", "UUID", &cls)) return false; @@ -1300,14 +1063,6 @@ static bool getObjectValue(PyObject *pObject, long& nValue) if (pObject == NULL) return false; -#if PY_MAJOR_VERSION < 3 - if (PyInt_Check(pObject)) - { - nValue = PyInt_AS_LONG(pObject); - return true; - } - -#endif if (PyLong_Check(pObject)) { nValue = PyLong_AsLong(pObject); @@ -1496,7 +1251,7 @@ bool BindParameter(Cursor* cur, Py_ssize_t index, ParamInfo& info) PyObject *row = PySequence_GetItem(info.pObject, PySequence_Size(info.pObject) - info.ColumnSize); Py_XDECREF(row); - info.nested = (ParamInfo*)pyodbc_malloc(ncols * sizeof(ParamInfo)); + info.nested = (ParamInfo*)PyMem_Malloc(ncols * sizeof(ParamInfo)); info.maxlength = ncols; memset(info.nested, 0, ncols * sizeof(ParamInfo)); @@ -1569,7 +1324,7 @@ void FreeParameterInfo(Cursor* cur) // since this information is also freed in the less granular free_results function that clears everything. Py_XDECREF(cur->pPreparedSQL); - pyodbc_free(cur->paramtypes); + PyMem_Free(cur->paramtypes); cur->pPreparedSQL = 0; cur->paramtypes = 0; cur->paramcount = 0; @@ -1577,14 +1332,6 @@ void FreeParameterInfo(Cursor* cur) bool Prepare(Cursor* cur, PyObject* pSql) { -#if PY_MAJOR_VERSION >= 3 - if (!PyUnicode_Check(pSql)) - { - PyErr_SetString(PyExc_TypeError, "SQL must be a Unicode string"); - return false; - } -#endif - // // Prepare the SQL if necessary. // @@ -1597,17 +1344,7 @@ bool Prepare(Cursor* cur, PyObject* pSql) const char* szErrorFunc = "SQLPrepare"; const TextEnc* penc; - -#if PY_MAJOR_VERSION < 3 - if (PyBytes_Check(pSql)) - { - penc = &cur->cnxn->str_enc; - } - else -#endif - { - penc = &cur->cnxn->unicode_enc; - } + penc = &cur->cnxn->unicode_enc; Object query(penc->Encode(pSql)); if (!query) @@ -1616,7 +1353,7 @@ bool Prepare(Cursor* cur, PyObject* pSql) bool isWide = (penc->ctype == SQL_C_WCHAR); const char* pch = PyBytes_AS_STRING(query.Get()); - SQLINTEGER cch = (SQLINTEGER)(PyBytes_GET_SIZE(query.Get()) / (isWide ? sizeof(ODBCCHAR) : 1)); + SQLINTEGER cch = (SQLINTEGER)(PyBytes_GET_SIZE(query.Get()) / (isWide ? sizeof(uint16_t) : 1)); TRACE("SQLPrepare(%s)\n", pch); @@ -1675,7 +1412,7 @@ bool PrepareAndBind(Cursor* cur, PyObject* pSql, PyObject* original_params, bool return false; } - cur->paramInfos = (ParamInfo*)pyodbc_malloc(sizeof(ParamInfo) * cParams); + cur->paramInfos = (ParamInfo*)PyMem_Malloc(sizeof(ParamInfo) * cParams); if (cur->paramInfos == 0) { PyErr_NoMemory(); @@ -1710,6 +1447,7 @@ bool PrepareAndBind(Cursor* cur, PyObject* pSql, PyObject* original_params, bool return true; } +/* bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj) { bool ret = true; @@ -1718,7 +1456,7 @@ bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj) if (!Prepare(cur, pSql)) return false; - if (!(cur->paramInfos = (ParamInfo*)pyodbc_malloc(sizeof(ParamInfo) * cur->paramcount))) + if (!(cur->paramInfos = (ParamInfo*)PyMem_Malloc(sizeof(ParamInfo) * cur->paramcount))) { PyErr_NoMemory(); return 0; @@ -1818,7 +1556,7 @@ bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj) // Assume parameters are homogeneous between rows in the common case, to avoid // another rescan for determining the array height. // Subtract number of rows processed as an upper bound. - if (!(cur->paramArray = (unsigned char*)pyodbc_malloc(rowlen * (rowcount - r)))) + if (!(cur->paramArray = (unsigned char*)PyMem_Malloc(rowlen * (rowcount - r)))) { PyErr_NoMemory(); goto ErrorRet4; @@ -1857,7 +1595,7 @@ bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj) if (!colseq) { ErrorRet5: - pyodbc_free(cur->paramArray); + PyMem_Free(cur->paramArray); cur->paramArray = 0; goto ErrorRet4; } @@ -1954,26 +1692,6 @@ bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj) const TextEnc& enc = cur->cnxn->sqlwchar_enc; PyObject* bytes = NULL; -#if PY_MAJOR_VERSION < 3 - int cb = PyUnicode_GET_DATA_SIZE(objCell) / 2; - const Py_UNICODE* source = PyUnicode_AS_UNICODE(objCell); - - switch (enc.optenc) - { - case OPTENC_UTF8: - bytes = PyUnicode_EncodeUTF8(source, cb, "strict"); - break; - case OPTENC_UTF16: - bytes = PyUnicode_EncodeUTF16(source, cb, "strict", BYTEORDER_NATIVE); - break; - case OPTENC_UTF16LE: - bytes = PyUnicode_EncodeUTF16(source, cb, "strict", BYTEORDER_LE); - break; - case OPTENC_UTF16BE: - bytes = PyUnicode_EncodeUTF16(source, cb, "strict", BYTEORDER_BE); - break; - } -#else switch (enc.optenc) { case OPTENC_UTF8: @@ -1989,7 +1707,6 @@ bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj) bytes = PyUnicode_AsEncodedString(objCell, "utf_16_be", NULL); break; } -#endif if (bytes && PyBytes_Check(bytes)) { objCell = bytes; @@ -1998,22 +1715,16 @@ bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj) } szLastFunction = "SQLPutData"; - if (PyBytes_Check(objCell) - #if PY_VERSION_HEX >= 0x02060000 - || PyByteArray_Check(objCell) - #endif - ) + if (PyBytes_Check(objCell) || PyByteArray_Check(objCell)) { char *(*pGetPtr)(PyObject*); Py_ssize_t (*pGetLen)(PyObject*); - #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(objCell)) { pGetPtr = PyByteArray_AsString; pGetLen = PyByteArray_Size; } else - #endif { pGetPtr = PyBytes_AsString; pGetLen = PyBytes_Size; @@ -2042,25 +1753,6 @@ bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj) Py_XDECREF(objCell); } } - #if PY_MAJOR_VERSION < 3 - else if (PyBuffer_Check(objCell)) - { - // Buffers can have multiple segments, so we might need multiple writes. Looping through buffers isn't - // difficult, but we've wrapped it up in an iterator object to keep this loop simple. - - BufferSegmentIterator it(objCell); - byte* pb; - SQLLEN cb; - while (it.Next(pb, cb)) - { - Py_BEGIN_ALLOW_THREADS - rc = SQLPutData(cur->hstmt, pb, cb); - Py_END_ALLOW_THREADS - if (!SQL_SUCCEEDED(rc)) - return RaiseErrorFromHandle(cur->cnxn, "SQLPutData", cur->cnxn->hdbc, cur->hstmt) != NULL; - } - } - #endif Py_XDECREF(pInfo->cell); rc = SQL_NEED_DATA; } @@ -2071,7 +1763,7 @@ bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj) SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)1, SQL_IS_UINTEGER); SQLSetStmtAttr(cur->hstmt, SQL_ATTR_PARAM_BIND_OFFSET_PTR, 0, SQL_IS_POINTER); - pyodbc_free(cur->paramArray); + PyMem_Free(cur->paramArray); cur->paramArray = 0; } @@ -2079,6 +1771,7 @@ bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj) FreeParameterData(cur); return ret; } +*/ static bool GetParamType(Cursor* cur, Py_ssize_t index, SQLSMALLINT& type) { @@ -2099,7 +1792,7 @@ static bool GetParamType(Cursor* cur, Py_ssize_t index, SQLSMALLINT& type) if (cur->paramtypes == 0) { - cur->paramtypes = reinterpret_cast(pyodbc_malloc(sizeof(SQLSMALLINT) * cur->paramcount)); + cur->paramtypes = reinterpret_cast(PyMem_Malloc(sizeof(SQLSMALLINT) * cur->paramcount)); if (cur->paramtypes == 0) { PyErr_NoMemory(); diff --git a/src/params.h b/src/params.h index c8e77de2..2965ff30 100644 --- a/src/params.h +++ b/src/params.h @@ -7,7 +7,7 @@ bool Params_init(); struct Cursor; bool PrepareAndBind(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first); -bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj); +/* bool ExecuteMulti(Cursor* cur, PyObject* pSql, PyObject* paramArrayObj); */ bool GetParameterInfo(Cursor* cur, Py_ssize_t index, PyObject* param, ParamInfo& info, bool isTVP); void FreeParameterData(Cursor* cur); void FreeParameterInfo(Cursor* cur); diff --git a/src/pyodbc.h b/src/pyodbc.h index d2de93b0..a18529e9 100644 --- a/src/pyodbc.h +++ b/src/pyodbc.h @@ -28,13 +28,8 @@ typedef long long INT64; typedef unsigned long long UINT64; #define _strcmpi strcasecmp #define _strdup strdup -#ifdef __MINGW32__ - #include - #include -#else inline int max(int lhs, int rhs) { return (rhs > lhs) ? rhs : lhs; } #endif -#endif #ifdef __SUN__ #include @@ -48,6 +43,7 @@ typedef unsigned long long UINT64; #include #include #include +#include #ifdef __CYGWIN__ #include @@ -56,16 +52,6 @@ typedef unsigned long long UINT64; #include #include -#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) -typedef int Py_ssize_t; -#define PY_SSIZE_T_MAX INT_MAX -#define PY_SSIZE_T_MIN INT_MIN -#define PyInt_AsSsize_t PyInt_AsLong -#define lenfunc inquiry -#define ssizeargfunc intargfunc -#define ssizeobjargproc intobjargproc -#endif - #ifndef _countof #define _countof(a) (sizeof(a) / sizeof(a[0])) #endif @@ -122,23 +108,6 @@ inline void _strlwr(char* name) #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) -// Building an actual debug version of Python is so much of a pain that it never happens. I'm providing release-build -// versions of assertions. - -#if defined(PYODBC_ASSERT) && defined(_MSC_VER) - #include - inline void FailAssert(const char* szFile, size_t line, const char* szExpr) - { - printf("assertion failed: %s(%d)\n%s\n", szFile, (int)line, szExpr); - __debugbreak(); // _CrtDbgBreak(); - } - #define I(expr) if (!(expr)) FailAssert(__FILE__, __LINE__, #expr); - #define N(expr) if (expr) FailAssert(__FILE__, __LINE__, #expr); -#else - #define I(expr) - #define N(expr) -#endif - #ifdef PYODBC_TRACE void DebugTrace(const char* szFmt, ...); #else @@ -146,21 +115,11 @@ inline void DebugTrace(const char* szFmt, ...) { UNUSED(szFmt); } #endif #define TRACE DebugTrace -// #ifdef PYODBC_LEAK_CHECK -// #define pyodbc_malloc(len) _pyodbc_malloc(__FILE__, __LINE__, len) -// void* _pyodbc_malloc(const char* filename, int lineno, size_t len); -// void pyodbc_free(void* p); -// void pyodbc_leak_check(); -// #else -#define pyodbc_malloc malloc -#define pyodbc_free free -// #endif - // issue #880: entry missing from iODBC sqltypes.h #ifndef BYTE typedef unsigned char BYTE; #endif -bool pyodbc_realloc(BYTE** pp, size_t newlen); +bool PyMem_Realloc(BYTE** pp, size_t newlen); // A wrapper around realloc with a safer interface. If it is successful, *pp is updated to the // new pointer value. If not successful, it is not modified. (It is easy to forget and lose // the old pointer value with realloc.) @@ -169,8 +128,4 @@ void PrintBytes(void* p, size_t len); const char* CTypeName(SQLSMALLINT n); const char* SqlTypeName(SQLSMALLINT n); -#include "pyodbccompat.h" - -#define HERE printf("%s(%d)\n", __FILE__, __LINE__) - #endif // pyodbc_h diff --git a/src/pyodbccompat.cpp b/src/pyodbccompat.cpp deleted file mode 100644 index 1cc8a407..00000000 --- a/src/pyodbccompat.cpp +++ /dev/null @@ -1,45 +0,0 @@ -#include "pyodbc.h" - -bool Text_EqualsI(PyObject* lhs, const char* rhs) -{ -#if PY_MAJOR_VERSION < 3 - // In Python 2, allow ANSI strings. - if (lhs && PyString_Check(lhs)) - return _strcmpi(PyString_AS_STRING(lhs), rhs) == 0; -#endif - - if (lhs == 0 || !PyUnicode_Check(lhs)) - return false; - - Py_ssize_t cchLHS = PyUnicode_GET_SIZE(lhs); - Py_ssize_t cchRHS = (Py_ssize_t)strlen(rhs); - if (cchLHS != cchRHS) - return false; - - Py_UNICODE* p = PyUnicode_AS_UNICODE(lhs); - for (Py_ssize_t i = 0; i < cchLHS; i++) - { - int chL = (int)Py_UNICODE_TOUPPER(p[i]); - int chR = (int)toupper(rhs[i]); - if (chL != chR) - return false; - } - - return true; -} - -#if PY_MAJOR_VERSION < 3 -int PyCodec_KnownEncoding(const char *encoding) -{ - PyObject* codec = _PyCodec_Lookup(encoding); - - if (codec) - { - Py_DECREF(codec); - return 1; - } - - PyErr_Clear(); - return 0; -} -#endif diff --git a/src/pyodbccompat.h b/src/pyodbccompat.h deleted file mode 100644 index 74067095..00000000 --- a/src/pyodbccompat.h +++ /dev/null @@ -1,158 +0,0 @@ -#ifndef PYODBCCOMPAT_H -#define PYODBCCOMPAT_H - -// Macros and functions to ease compatibility with Python 2 and Python 3. - -#if PY_VERSION_HEX >= 0x03000000 && PY_VERSION_HEX < 0x03010000 -#error Python 3.0 is not supported. Please use 3.1 and higher. -#endif - -// Macros introduced in 2.6, backported for 2.4 and 2.5. -#ifndef PyVarObject_HEAD_INIT -#define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, -#endif -#ifndef Py_TYPE -#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) -#endif - -// Macros were introduced in 2.6 to map "bytes" to "str" in Python 2. Back port to 2.5. -#if PY_VERSION_HEX >= 0x02060000 - #include -#else - #define PyBytes_AS_STRING PyString_AS_STRING - #define PyBytes_Check PyString_Check - #define PyBytes_CheckExact PyString_CheckExact - #define PyBytes_FromStringAndSize PyString_FromStringAndSize - #define PyBytes_GET_SIZE PyString_GET_SIZE - #define PyBytes_Size PyString_Size - #define _PyBytes_Resize _PyString_Resize -#endif - -// Used for items that are ANSI in Python 2 and Unicode in Python 3 or in int 2 and long in 3. - -#if PY_MAJOR_VERSION >= 3 - #define PyString_FromString PyUnicode_FromString - #define PyString_FromStringAndSize PyUnicode_FromStringAndSize - #define PyString_Check PyUnicode_Check - #define PyString_Type PyUnicode_Type - #define PyString_Size PyUnicode_Size - #define PyInt_FromLong PyLong_FromLong - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_Type PyLong_Type - #define PyString_FromFormatV PyUnicode_FromFormatV - #define PyString_FromFormat PyUnicode_FromFormat - #define Py_TPFLAGS_HAVE_ITER 0 - #define PyString_AsString PyUnicode_AsString - - #define TEXT_T Py_UNICODE - - #define PyString_Join PyUnicode_Join - -inline void PyString_ConcatAndDel(PyObject** lhs, PyObject* rhs) -{ - PyUnicode_Concat(*lhs, rhs); - Py_DECREF(rhs); -} - -#else - #include - #include - #include - - #define TEXT_T char - - #define PyString_Join _PyString_Join - -#endif - -inline PyObject* Text_New(Py_ssize_t length) -{ - // Returns a new, uninitialized String (Python 2) or Unicode object (Python 3) object. -#if PY_MAJOR_VERSION < 3 - return PyString_FromStringAndSize(0, length); -#else - return PyUnicode_FromUnicode(0, length); -#endif -} - -inline TEXT_T* Text_Buffer(PyObject* o) -{ -#if PY_MAJOR_VERSION < 3 - I(PyString_Check(o)); - return PyString_AS_STRING(o); -#else - I(PyUnicode_Check(o)); - return PyUnicode_AS_UNICODE(o); -#endif -} - - -inline bool IntOrLong_Check(PyObject* o) -{ - // A compatibility function to check for an int or long. Python 3 doesn't differentate - // anymore. - // A compatibility function that determines if the object is a string, based on the version of Python. - // For Python 2, an ASCII or Unicode string is allowed. For Python 3, it must be a Unicode object. -#if PY_MAJOR_VERSION < 3 - if (o && PyInt_Check(o)) - return true; -#endif - return o && PyLong_Check(o); -} - -inline bool Text_Check(PyObject* o) -{ - // A compatibility function that determines if the object is a string, based on the version of Python. - // For Python 2, an ASCII or Unicode string is allowed. For Python 3, it must be a Unicode object. -#if PY_MAJOR_VERSION < 3 - if (o && PyString_Check(o)) - return true; -#endif - return o && PyUnicode_Check(o); -} - -bool Text_EqualsI(PyObject* lhs, const char* rhs); -// Case-insensitive comparison for a Python string object (Unicode in Python 3, ASCII or Unicode in Python 2) against -// an ASCII string. If lhs is 0 or None, false is returned. - - -inline Py_ssize_t Text_Size(PyObject* o) -{ -#if PY_MAJOR_VERSION < 3 - if (o && PyString_Check(o)) - return PyString_GET_SIZE(o); -#endif - return (o && PyUnicode_Check(o)) ? PyUnicode_GET_SIZE(o) : 0; -} - -inline Py_ssize_t TextCopyToUnicode(Py_UNICODE* buffer, PyObject* o) -{ - // Copies a String or Unicode object to a Unicode buffer and returns the number of characters copied. - // No NULL terminator is appended! - -#if PY_MAJOR_VERSION < 3 - if (PyBytes_Check(o)) - { - const Py_ssize_t cch = PyBytes_GET_SIZE(o); - const char * pch = PyBytes_AS_STRING(o); - for (Py_ssize_t i = 0; i < cch; i++) - *buffer++ = (Py_UNICODE)*pch++; - return cch; - } - else - { -#endif - Py_ssize_t cch = PyUnicode_GET_SIZE(o); - memcpy(buffer, PyUnicode_AS_UNICODE(o), cch * sizeof(Py_UNICODE)); - return cch; -#if PY_MAJOR_VERSION < 3 - } -#endif -} - -#if PY_MAJOR_VERSION < 3 -int PyCodec_KnownEncoding(const char *encoding); -#endif - -#endif // PYODBCCOMPAT_H diff --git a/src/pyodbcdbg.cpp b/src/pyodbcdbg.cpp index cdcdedb2..574ab863 100644 --- a/src/pyodbcdbg.cpp +++ b/src/pyodbcdbg.cpp @@ -100,91 +100,3 @@ void DebugTrace(const char* szFmt, ...) } #endif -#ifdef PYODBC_LEAK_CHECK - -// THIS IS NOT THREAD SAFE: This is only designed for the single-threaded unit tests! - -struct Allocation -{ - const char* filename; - int lineno; - size_t len; - void* pointer; - int counter; -}; - -static Allocation* allocs = 0; -static int bufsize = 0; -static int count = 0; -static int allocCounter = 0; - -void* _pyodbc_malloc(const char* filename, int lineno, size_t len) -{ - void* p = malloc(len); - if (p == 0) - return 0; - - if (count == bufsize) - { - allocs = (Allocation*)realloc(allocs, (bufsize + 20) * sizeof(Allocation)); - if (allocs == 0) - { - // Yes we just lost the original pointer, but we don't care since everything is about to fail. This is a - // debug leak check, not a production malloc that needs to be robust in low memory. - bufsize = 0; - count = 0; - return 0; - } - bufsize += 20; - } - - allocs[count].filename = filename; - allocs[count].lineno = lineno; - allocs[count].len = len; - allocs[count].pointer = p; - allocs[count].counter = allocCounter++; - - printf("malloc(%d): %s(%d) %d %p\n", allocs[count].counter, filename, lineno, (int)len, p); - - count += 1; - - return p; -} - -void pyodbc_free(void* p) -{ - if (p == 0) - return; - - for (int i = 0; i < count; i++) - { - if (allocs[i].pointer == p) - { - printf("free(%d): %s(%d) %d %p i=%d\n", allocs[i].counter, allocs[i].filename, allocs[i].lineno, (int)allocs[i].len, allocs[i].pointer, i); - memmove(&allocs[i], &allocs[i + 1], sizeof(Allocation) * (count - i - 1)); - count -= 1; - free(p); - return; - } - } - - printf("FREE FAILED: %p\n", p); - free(p); -} - -void pyodbc_leak_check() -{ - if (count == 0) - { - printf("NO LEAKS\n"); - } - else - { - printf("********************************************************************************\n"); - printf("%d leaks\n", count); - for (int i = 0; i < count; i++) - printf("LEAK: %d %s(%d) len=%d\n", allocs[i].counter, allocs[i].filename, allocs[i].lineno, allocs[i].len); - } -} - -#endif diff --git a/src/pyodbcmodule.cpp b/src/pyodbcmodule.cpp index d0b69c58..4c5ededb 100644 --- a/src/pyodbcmodule.cpp +++ b/src/pyodbcmodule.cpp @@ -26,6 +26,10 @@ #include #include +#ifdef WITH_NUMPY +#include "npcontainer.h" +#endif + static PyObject* MakeConnectionString(PyObject* existing, PyObject* parts); PyObject* pModule = 0; @@ -127,13 +131,13 @@ static ExcInfo aExcInfos[] = { }; -bool pyodbc_realloc(BYTE** pp, size_t newlen) +bool PyMem_Realloc(BYTE** pp, size_t newlen) { // A wrapper around realloc with a safer interface. If it is successful, *pp is updated to the // new pointer value. If not successful, it is not modified. (It is easy to forget and lose // the old pointer value with realloc.) - BYTE* pT = (BYTE*)realloc(*pp, newlen); + BYTE* pT = (BYTE*)PyMem_Realloc(*pp, newlen); if (pT == 0) return false; *pp = pT; @@ -163,7 +167,7 @@ PyObject* GetClassForThread(const char* szModule, const char* szClass) // modules.) PyObject* dict = PyThreadState_GetDict(); - I(dict); + assert(dict); if (dict == 0) { // I don't know why there wouldn't be thread state so I'm going to raise an exception @@ -271,6 +275,9 @@ static bool import_types() if (!InitializeDecimal()) return false; +#ifdef WITH_NUMPY + NpContainer_init(); +#endif return true; } @@ -307,13 +314,8 @@ static bool AllocateEnv() static bool CheckAttrsVal(PyObject *val, bool allowSeq) { - if (IntOrLong_Check(val) -#if PY_MAJOR_VERSION < 3 - || PyBuffer_Check(val) -#endif -#if PY_VERSION_HEX >= 0x02060000 + if (PyLong_Check(val) || PyByteArray_Check(val) -#endif || PyBytes_Check(val) || PyUnicode_Check(val)) return true; @@ -352,7 +354,7 @@ static PyObject* _CheckAttrsDict(PyObject* attrs) PyObject* value = 0; while (PyDict_Next(attrs, &pos, &key, &value)) { - if (!IntOrLong_Check(key)) + if (!PyLong_Check(key)) return PyErr_Format(PyExc_TypeError, "Attribute dictionary keys must be integers"); if (!CheckAttrsVal(value, true)) @@ -386,7 +388,6 @@ static PyObject* mod_connect(PyObject* self, PyObject* args, PyObject* kwargs) Object pConnectString; int fAutoCommit = 0; - int fAnsi = 0; // force ansi int fReadOnly = 0; long timeout = 0; Object encoding; @@ -403,7 +404,7 @@ static PyObject* mod_connect(PyObject* self, PyObject* args, PyObject* kwargs) if (size == 1) { - if (!PyString_Check(PyTuple_GET_ITEM(args, 0)) && !PyUnicode_Check(PyTuple_GET_ITEM(args, 0))) + if (!PyUnicode_Check(PyTuple_GET_ITEM(args, 0)) && !PyUnicode_Check(PyTuple_GET_ITEM(args, 0))) return PyErr_Format(PyExc_TypeError, "argument 1 must be a string or unicode object"); pConnectString.Attach(PyUnicode_FromObject(PyTuple_GetItem(args, 0))); @@ -425,53 +426,43 @@ static PyObject* mod_connect(PyObject* self, PyObject* args, PyObject* kwargs) while (PyDict_Next(kwargs, &pos, &key, &value)) { - if (!Text_Check(key)) + if (!PyUnicode_Check(key)) return PyErr_Format(PyExc_TypeError, "Dictionary keys passed to connect must be strings"); // // Note: key and value are *borrowed*. // // // Check for the two non-connection string keywords we accept. (If we get many more of these, create something // // table driven. Are we sure there isn't a Python function to parse keywords but leave those it doesn't know?) - // const char* szKey = PyString_AsString(key); + // const char* szKey = PyUnicode_AsString(key); - if (Text_EqualsI(key, "autocommit")) + if (PyUnicode_CompareWithASCIIString(key, "autocommit") == 0) { fAutoCommit = PyObject_IsTrue(value); continue; } - if (Text_EqualsI(key, "ansi")) - { - fAnsi = PyObject_IsTrue(value); - continue; - } - if (Text_EqualsI(key, "timeout")) + if (PyUnicode_CompareWithASCIIString(key, "timeout") == 0) { - timeout = PyInt_AsLong(value); + timeout = PyLong_AsLong(value); if (PyErr_Occurred()) return 0; continue; } - if (Text_EqualsI(key, "readonly")) + if (PyUnicode_CompareWithASCIIString(key, "readonly") == 0) { fReadOnly = PyObject_IsTrue(value); continue; } - if (Text_EqualsI(key, "attrs_before") && PyDict_Check(value)) + if (PyUnicode_CompareWithASCIIString(key, "attrs_before") == 0 && PyDict_Check(value)) { attrs_before = _CheckAttrsDict(value); if (PyErr_Occurred()) return 0; continue; } - if (Text_EqualsI(key, "encoding")) + if (PyUnicode_CompareWithASCIIString(key, "encoding") == 0) { -#if PY_MAJOR_VERSION < 3 - if (!PyString_Check(value) && !PyUnicode_Check(value)) - return PyErr_Format(PyExc_TypeError, "encoding must be a string or unicode object"); -#else if (!PyUnicode_Check(value)) return PyErr_Format(PyExc_TypeError, "encoding must be a string"); -#endif encoding = value; continue; } @@ -480,11 +471,11 @@ static PyObject* mod_connect(PyObject* self, PyObject* args, PyObject* kwargs) for (size_t i = 0; i < _countof(keywordmaps); i++) { - if (Text_EqualsI(key, keywordmaps[i].oldname)) + if (PyUnicode_CompareWithASCIIString(key, keywordmaps[i].oldname) == 0) { if (keywordmaps[i].newnameObject == 0) { - keywordmaps[i].newnameObject = PyString_FromString(keywordmaps[i].newname); + keywordmaps[i].newnameObject = PyUnicode_FromString(keywordmaps[i].newname); if (keywordmaps[i].newnameObject == 0) return 0; } @@ -520,7 +511,7 @@ static PyObject* mod_connect(PyObject* self, PyObject* args, PyObject* kwargs) return 0; } - return (PyObject*)Connection_New(pConnectString.Get(), fAutoCommit != 0, fAnsi != 0, timeout, + return (PyObject*)Connection_New(pConnectString.Get(), fAutoCommit != 0, timeout, fReadOnly != 0, attrs_before.Detach(), encoding); } @@ -553,7 +544,7 @@ static PyObject* mod_drivers(PyObject* self) // REVIEW: This is another reason why we really need a factory that we can use. At this // point we don't have a global text encoding that we can assume for this. Somehow it // seems to be working to use UTF-8, even on Windows. - Object name(PyString_FromString((const char*)szDriverDesc)); + Object name(PyUnicode_FromString((const char*)szDriverDesc)); if (!name) return 0; @@ -620,8 +611,8 @@ static PyObject* mod_datasources(PyObject* self) if (!SQL_SUCCEEDED(ret)) break; - PyObject* key = PyString_FromString((const char*)szDSN); - PyObject* val = PyString_FromString((const char*)szDesc); + PyObject* key = PyUnicode_FromString((const char*)szDSN); + PyObject* val = PyUnicode_FromString((const char*)szDesc); #endif if(key && val) @@ -700,7 +691,7 @@ static PyObject* mod_getdecimalsep(PyObject* self) } static char connect_doc[] = - "connect(str, autocommit=False, ansi=False, timeout=0, **kwargs) --> Connection\n" + "connect(str, autocommit=False, timeout=0, **kwargs) --> Connection\n" "\n" "Accepts an ODBC connection string and returns a new Connection object.\n" "\n" @@ -718,7 +709,7 @@ static char connect_doc[] = "documentation or the documentation of your ODBC driver for details.\n" "\n" "The connection string can be passed as the string `str`, as a list of keywords,\n" - "or a combination of the two. Any keywords except autocommit, ansi, and timeout\n" + "or a combination of the two. Any keywords except autocommit and timeout\n" "(see below) are simply added to the connection string.\n" "\n" " connect('server=localhost;user=me')\n" @@ -740,15 +731,6 @@ static char connect_doc[] = " defined in the DB API 2. If True or non-zero, the connection is put into\n" " ODBC autocommit mode and statements are committed automatically.\n" " \n" - " ansi\n" - " By default, pyodbc first attempts to connect using the Unicode version of\n" - " SQLDriverConnectW. If the driver returns IM001 indicating it does not\n" - " support the Unicode version, the ANSI version is tried. Any other SQLSTATE\n" - " is turned into an exception. Setting ansi to true skips the Unicode\n" - " attempt and only connects using the ANSI version. This is useful for\n" - " drivers that return the wrong SQLSTATE (or if pyodbc is out of date and\n" - " should support other SQLSTATEs).\n" - " \n" " timeout\n" " An integer login timeout in seconds, used to set the SQL_ATTR_LOGIN_TIMEOUT\n" " attribute of the connection. The default is 0 which means the database's\n" @@ -796,16 +778,6 @@ static char getdecimalsep_doc[] = "Gets the decimal separator character used when parsing NUMERIC from the database."; -#ifdef PYODBC_LEAK_CHECK -static PyObject* mod_leakcheck(PyObject* self, PyObject* args) -{ - UNUSED(self, args); - pyodbc_leak_check(); - Py_RETURN_NONE; -} -#endif - - static PyMethodDef pyodbc_methods[] = { { "connect", (PyCFunction)mod_connect, METH_VARARGS|METH_KEYWORDS, connect_doc }, @@ -816,11 +788,6 @@ static PyMethodDef pyodbc_methods[] = { "TimestampFromTicks", (PyCFunction)mod_timestampfromticks, METH_VARARGS, timestampfromticks_doc }, { "drivers", (PyCFunction)mod_drivers, METH_NOARGS, drivers_doc }, { "dataSources", (PyCFunction)mod_datasources, METH_NOARGS, datasources_doc }, - -#ifdef PYODBC_LEAK_CHECK - { "leakcheck", (PyCFunction)mod_leakcheck, METH_NOARGS, 0 }, -#endif - { 0, 0, 0, 0 } }; @@ -1148,7 +1115,7 @@ static bool CreateExceptions() if (!classdict) return false; - PyObject* doc = PyString_FromString(info.szDoc); + PyObject* doc = PyUnicode_FromString(info.szDoc); if (!doc) { Py_DECREF(classdict); @@ -1174,7 +1141,6 @@ static bool CreateExceptions() return true; } -#if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "pyodbc", // m_name @@ -1185,36 +1151,24 @@ static struct PyModuleDef moduledef = { 0, // m_traverse 0, // m_clear 0, // m_free - }; - #define MODRETURN(v) v -#else - #define MODRETURN(v) -#endif +}; -PyMODINIT_FUNC -#if PY_MAJOR_VERSION >= 3 -PyInit_pyodbc() -#else -initpyodbc(void) -#endif + +PyMODINIT_FUNC PyInit_pyodbc() { ErrorInit(); if (PyType_Ready(&ConnectionType) < 0 || PyType_Ready(&CursorType) < 0 || PyType_Ready(&RowType) < 0 || PyType_Ready(&CnxnInfoType) < 0) - return MODRETURN(0); + return 0; Object module; -#if PY_MAJOR_VERSION >= 3 module.Attach(PyModule_Create(&moduledef)); -#else - module.Attach(Py_InitModule4("pyodbc", pyodbc_methods, module_doc, NULL, PYTHON_API_VERSION)); -#endif pModule = module.Get(); if (!module || !import_types() || !CreateExceptions()) - return MODRETURN(0); + return 0; const char* szVersion = TOSTRING(PYODBC_VERSION); PyModule_AddStringConstant(module, "version", (char*)szVersion); @@ -1248,28 +1202,23 @@ initpyodbc(void) Py_INCREF((PyObject*)PyDateTimeAPI->DateTimeType); PyModule_AddObject(module, "DATETIME", (PyObject*)PyDateTimeAPI->DateTimeType); Py_INCREF((PyObject*)PyDateTimeAPI->DateTimeType); - PyModule_AddObject(module, "STRING", (PyObject*)&PyString_Type); - Py_INCREF((PyObject*)&PyString_Type); + PyModule_AddObject(module, "STRING", (PyObject*)&PyUnicode_Type); + Py_INCREF((PyObject*)&PyUnicode_Type); PyModule_AddObject(module, "NUMBER", (PyObject*)&PyFloat_Type); Py_INCREF((PyObject*)&PyFloat_Type); - PyModule_AddObject(module, "ROWID", (PyObject*)&PyInt_Type); - Py_INCREF((PyObject*)&PyInt_Type); + PyModule_AddObject(module, "ROWID", (PyObject*)&PyLong_Type); + Py_INCREF((PyObject*)&PyLong_Type); PyObject* binary_type; -#if PY_VERSION_HEX >= 0x02060000 binary_type = (PyObject*)&PyByteArray_Type; -#else - binary_type = (PyObject*)&PyBuffer_Type; -#endif PyModule_AddObject(module, "BINARY", binary_type); Py_INCREF(binary_type); PyModule_AddObject(module, "Binary", binary_type); Py_INCREF(binary_type); - I(null_binary != 0); // must be initialized first + assert(null_binary != 0); // must be initialized first PyModule_AddObject(module, "BinaryNull", null_binary); - PyModule_AddIntConstant(module, "UNICODE_SIZE", sizeof(Py_UNICODE)); PyModule_AddIntConstant(module, "SQLWCHAR_SIZE", sizeof(SQLWCHAR)); if (!PyErr_Occurred()) @@ -1281,21 +1230,19 @@ initpyodbc(void) ErrorCleanup(); } - return MODRETURN(pModule); + return pModule; } + #ifdef WINVER -BOOL WINAPI DllMain( - HINSTANCE hMod, - DWORD fdwReason, - LPVOID lpvReserved - ) +BOOL WINAPI DllMain(HINSTANCE hMod, DWORD fdwReason, LPVOID lpvReserved) { UNUSED(hMod, fdwReason, lpvReserved); return TRUE; } #endif + static PyObject* MakeConnectionString(PyObject* existing, PyObject* parts) { // Creates a connection string from an optional existing connection string plus a dictionary of keyword value @@ -1310,44 +1257,12 @@ static PyObject* MakeConnectionString(PyObject* existing, PyObject* parts) // parts // A dictionary of text keywords and text values that will be appended. - I(PyUnicode_Check(existing)); + assert(PyUnicode_Check(existing)); Py_ssize_t pos = 0; PyObject* key = 0; PyObject* value = 0; Py_ssize_t length = 0; // length in *characters* -#if PY_MAJOR_VERSION < 3 - if (existing) - length = Text_Size(existing) + 1; // + 1 to add a trailing semicolon - - while (PyDict_Next(parts, &pos, &key, &value)) - { - length += Text_Size(key) + 1 + Text_Size(value) + 1; // key=value; - } - - PyObject* result = PyUnicode_FromUnicode(0, length); - if (!result) - return 0; - - Py_UNICODE* buffer = PyUnicode_AS_UNICODE(result); - Py_ssize_t offset = 0; - - if (existing) - { - offset += TextCopyToUnicode(&buffer[offset], existing); - buffer[offset++] = (Py_UNICODE)';'; - } - - pos = 0; - while (PyDict_Next(parts, &pos, &key, &value)) - { - offset += TextCopyToUnicode(&buffer[offset], key); - buffer[offset++] = (Py_UNICODE)'='; - - offset += TextCopyToUnicode(&buffer[offset], value); - buffer[offset++] = (Py_UNICODE)';'; - } -#else // >= Python 3.3 int result_kind = PyUnicode_1BYTE_KIND; if (existing) { length = PyUnicode_GET_LENGTH(existing) + 1; // + 1 to add a trailing semicolon @@ -1406,8 +1321,8 @@ static PyObject* MakeConnectionString(PyObject* existing, PyObject* parts) offset += count; PyUnicode_WriteChar(result, offset++, (Py_UCS4)';'); } -#endif - I(offset == length); + + assert(offset == length); return result; } diff --git a/src/row.cpp b/src/row.cpp index d18acbb2..405637e3 100644 --- a/src/row.cpp +++ b/src/row.cpp @@ -45,7 +45,7 @@ void FreeRowValues(Py_ssize_t cValues, PyObject** apValues) { for (Py_ssize_t i = 0; i < cValues; i++) Py_XDECREF(apValues[i]); - pyodbc_free(apValues); + PyMem_Free(apValues); } } @@ -74,17 +74,17 @@ static PyObject* Row_getstate(PyObject* self) if (row->description == 0) return PyTuple_New(0); - Tuple state(PyTuple_New(2 + row->cValues)); + Object state(PyTuple_New(2 + row->cValues)); if (!state.IsValid()) return 0; - state[0] = row->description; - state[1] = row->map_name_to_index; + PyTuple_SET_ITEM(state, 0, row->description); + PyTuple_SET_ITEM(state, 1, row->map_name_to_index); for (int i = 0; i < row->cValues; i++) - state[i+2] = row->apValues[i]; + PyTuple_SET_ITEM(state, i+2, row->apValues[i]); - for (int i = 0; i < 2 + row->cValues; i++) - Py_XINCREF(state[i]); + for (int i = 0; i < PyTuple_GET_SIZE(state); i++) + Py_XINCREF(PyTuple_GET_ITEM(state, i)); return state.Detach(); } @@ -110,7 +110,7 @@ static PyObject* new_check(PyObject* args) if (PyDict_Size(map) != cols || PyTuple_GET_SIZE(args) - 2 != cols) return 0; - PyObject** apValues = (PyObject**)pyodbc_malloc(sizeof(PyObject*) * cols); + PyObject** apValues = (PyObject**)PyMem_Malloc(sizeof(PyObject*) * cols); if (!apValues) return 0; @@ -264,16 +264,16 @@ static PyObject* Row_repr(PyObject* o) Row* self = (Row*)o; - Object tmp(PyTuple_New(self->cValues)); - if (!tmp) - return 0; + Object t(PyTuple_New(self->cValues)); + if (!t) + return 0; for (Py_ssize_t i = 0; i < self->cValues; i++) { Py_INCREF(self->apValues[i]); - PyTuple_SET_ITEM(tmp.Get(), i, self->apValues[i]); + PyTuple_SET_ITEM(t.Get(), i, self->apValues[i]); } - return PyObject_Repr(tmp); + return PyObject_Repr(t); } static PyObject* Row_richcompare(PyObject* olhs, PyObject* orhs, int op) @@ -352,13 +352,8 @@ static PyObject* Row_subscript(PyObject* o, PyObject* key) if (PySlice_Check(key)) { Py_ssize_t start, stop, step, slicelength; -#if PY_VERSION_HEX >= 0x03020000 if (PySlice_GetIndicesEx(key, row->cValues, &start, &stop, &step, &slicelength) < 0) return 0; -#else - if (PySlice_GetIndicesEx((PySliceObject*)key, row->cValues, &start, &stop, &step, &slicelength) < 0) - return 0; -#endif if (slicelength <= 0) return PyTuple_New(0); diff --git a/src/textenc.cpp b/src/textenc.cpp index ca209c2d..725265f0 100644 --- a/src/textenc.cpp +++ b/src/textenc.cpp @@ -5,9 +5,9 @@ void SQLWChar::init(PyObject* src, const TextEnc& enc) { - // Initialization code common to all of the constructors. - // - // Convert `src` to SQLWCHAR. + // Initialization code common to all of the constructors. + // + // Convert `src` to SQLWCHAR. static PyObject* nulls = NULL; @@ -21,37 +21,14 @@ void SQLWChar::init(PyObject* src, const TextEnc& enc) isNone = false; // If there are optimized encodings that don't require a temporary object, use them. -#if PY_MAJOR_VERSION < 3 - if (enc.optenc == OPTENC_RAW && PyString_Check(src)) - { - psz = (SQLWCHAR*)PyString_AS_STRING(src); - return; - } -#endif - -#if PY_MAJOR_VERSION >= 3 if (enc.optenc == OPTENC_UTF8 && PyUnicode_Check(src)) { psz = (SQLWCHAR*)PyUnicode_AsUTF8(src); return; } -#endif PyObject* pb = 0; -#if PY_MAJOR_VERSION == 2 - if (PyBytes_Check(src)) - { - // If this is Python 2, the string could already be encoded as bytes. If the encoding is - // different than what we want, we have to decode to Unicode and then re-encode. - - - PyObject* u = PyString_AsDecodedObject(src, 0, "strict"); - if (u) - src = u; - } -#endif - if (!pb && PyUnicode_Check(src)) pb = PyUnicode_AsEncodedString(src, enc.name, "strict"); @@ -76,10 +53,10 @@ void SQLWChar::init(PyObject* src, const TextEnc& enc) return; } } else { - // If the encoding failed (possibly due to "strict"), it will generate an exception, but - // we're going to continue. - PyErr_Clear(); - psz = 0; + // If the encoding failed (possibly due to "strict"), it will generate an exception, but + // we're going to continue. + PyErr_Clear(); + psz = 0; } if (pb) { @@ -91,14 +68,6 @@ void SQLWChar::init(PyObject* src, const TextEnc& enc) PyObject* TextEnc::Encode(PyObject* obj) const { -#if PY_MAJOR_VERSION < 3 - if (optenc == OPTENC_RAW || PyBytes_Size(obj) == 0) - { - Py_INCREF(obj); - return obj; - } -#endif - PyObject* bytes = PyCodec_Encode(obj, name, "strict"); if (bytes && PyErr_Occurred()) @@ -119,34 +88,6 @@ PyObject* TextEnc::Encode(PyObject* obj) const } -#if PY_MAJOR_VERSION < 3 -PyObject* EncodeStr(PyObject* str, const TextEnc& enc) -{ - if (enc.optenc == OPTENC_RAW || PyBytes_Size(str) == 0) - { - // No conversion. - Py_INCREF(str); - return str; - } - else - { - // Encode the text with the user's encoding. - Object encoded(PyCodec_Encode(str, enc.name, "strict")); - if (!encoded) - return 0; - - if (!PyBytes_CheckExact(encoded)) - { - // Not all encodings return bytes. - PyErr_Format(PyExc_TypeError, "Unicode read encoding '%s' returned unexpected data type: %s", - enc.name, encoded.Get()->ob_type->tp_name); - return 0; - } - - return encoded.Detach(); - } -} -#endif PyObject* TextBufferToObject(const TextEnc& enc, const byte* pbData, Py_ssize_t cbData) { @@ -158,86 +99,35 @@ PyObject* TextBufferToObject(const TextEnc& enc, const byte* pbData, Py_ssize_t // first pointed this out with shift_jis. I'm not sure if it is a fault in the // implementation of this codec or if others will have it also. - PyObject* str; + // PyObject* str; -#if PY_MAJOR_VERSION < 3 - // The Unicode paths use the same code. - if (enc.to == TO_UNICODE) + if (cbData == 0) + return PyUnicode_FromStringAndSize("", 0); + + switch (enc.optenc) { -#endif - if (cbData == 0) - { - str = PyUnicode_FromStringAndSize("", 0); + case OPTENC_UTF8: + return PyUnicode_DecodeUTF8((char*)pbData, cbData, "strict"); + + case OPTENC_UTF16: { + int byteorder = BYTEORDER_NATIVE; + return PyUnicode_DecodeUTF16((char*)pbData, cbData, "strict", &byteorder); } - else - { - int byteorder = 0; - switch (enc.optenc) - { - case OPTENC_UTF8: - str = PyUnicode_DecodeUTF8((char*)pbData, cbData, "strict"); - break; - case OPTENC_UTF16: - byteorder = BYTEORDER_NATIVE; - str = PyUnicode_DecodeUTF16((char*)pbData, cbData, "strict", &byteorder); - break; - case OPTENC_UTF16LE: - byteorder = BYTEORDER_LE; - str = PyUnicode_DecodeUTF16((char*)pbData, cbData, "strict", &byteorder); - break; - case OPTENC_UTF16BE: - byteorder = BYTEORDER_BE; - str = PyUnicode_DecodeUTF16((char*)pbData, cbData, "strict", &byteorder); - break; - case OPTENC_LATIN1: - str = PyUnicode_DecodeLatin1((char*)pbData, cbData, "strict"); - break; - default: - // The user set an encoding by name. - str = PyUnicode_Decode((char*)pbData, cbData, enc.name, "strict"); - break; - } + + case OPTENC_UTF16LE: { + int byteorder = BYTEORDER_LE; + return PyUnicode_DecodeUTF16((char*)pbData, cbData, "strict", &byteorder); } -#if PY_MAJOR_VERSION < 3 - } - else if (cbData == 0) - { - str = PyString_FromStringAndSize("", 0); - } - else if (enc.optenc == OPTENC_RAW) - { - // No conversion. - str = PyString_FromStringAndSize((char*)pbData, cbData); - } - else - { - // The user has requested a string object. Unfortunately we don't have - // str versions of all of the optimized functions. - const char* encoding; - switch (enc.optenc) - { - case OPTENC_UTF8: - encoding = "utf-8"; - break; - case OPTENC_UTF16: - encoding = "utf-16"; - break; - case OPTENC_UTF16LE: - encoding = "utf-16-le"; - break; - case OPTENC_UTF16BE: - encoding = "utf-16-be"; - break; - case OPTENC_LATIN1: - encoding = "latin-1"; - break; - default: - encoding = enc.name; + + case OPTENC_UTF16BE: { + int byteorder = BYTEORDER_BE; + return PyUnicode_DecodeUTF16((char*)pbData, cbData, "strict", &byteorder); } - str = PyString_Decode((char*)pbData, cbData, encoding, "strict"); + case OPTENC_LATIN1: + return PyUnicode_DecodeLatin1((char*)pbData, cbData, "strict"); } -#endif - return str; + // The user set an encoding by name. + return PyUnicode_Decode((char*)pbData, cbData, enc.name, "strict"); } diff --git a/src/textenc.h b/src/textenc.h index 1a8bed91..08557da6 100644 --- a/src/textenc.h +++ b/src/textenc.h @@ -7,20 +7,14 @@ enum { BYTEORDER_BE = 1, OPTENC_NONE = 0, // No optimized encoding - use the named encoding - OPTENC_RAW = 1, // In Python 2, pass bytes directly to string - no decoder - OPTENC_UTF8 = 2, - OPTENC_UTF16 = 3, // "Native", so check for BOM and default to BE - OPTENC_UTF16BE = 4, - OPTENC_UTF16LE = 5, - OPTENC_LATIN1 = 6, - OPTENC_UTF32 = 7, - OPTENC_UTF32LE = 8, - OPTENC_UTF32BE = 9, - -#if PY_MAJOR_VERSION < 3 - TO_UNICODE = 1, - TO_STR = 2 -#endif + OPTENC_UTF8 = 1, + OPTENC_UTF16 = 2, // "Native", so check for BOM and default to BE + OPTENC_UTF16BE = 3, + OPTENC_UTF16LE = 4, + OPTENC_LATIN1 = 5, + OPTENC_UTF32 = 6, + OPTENC_UTF32LE = 7, + OPTENC_UTF32BE = 8, }; #ifdef WORDS_BIGENDIAN @@ -31,16 +25,6 @@ enum { # define ENCSTR_UTF16NE "utf-16le" #endif -typedef unsigned short ODBCCHAR; -// I'm not sure why, but unixODBC seems to define SQLWCHAR as wchar_t even with -// the size is incorrect. So we might get 4-byte SQLWCHAR on 64-bit Linux even -// though it requires 2-byte characters. We have to define our own type to -// operate on. - -enum { - ODBCCHAR_SIZE = 2 -}; - struct TextEnc { // Holds encoding information for reading or writing text. Since some drivers / databases @@ -50,12 +34,10 @@ struct TextEnc // * reading SQL_CHAR // * reading SQL_WCHAR // * writing unicode strings - // * writing non-unicode strings (Python 2.7 only) - -#if PY_MAJOR_VERSION < 3 - int to; - // The type of object to return if reading from the database: str or unicode. -#endif + // * reading metadata like column names + // + // I would have expected the metadata to follow the SQLCHAR / SQLWCHAR based on whether the + // ANSI or wide API was called, but it does not. int optenc; // Set to one of the OPTENC constants to indicate whether an optimized encoding is to be @@ -71,48 +53,39 @@ struct TextEnc // SQL_WCHAR data even when configured for UTF-8 which is better suited for SQL_C_CHAR. PyObject* Encode(PyObject*) const; - // Given a string (unicode or str for 2.7), return a bytes object encoded. This is used - // for encoding a Python object for passing to a function expecting SQLCHAR* or SQLWCHAR*. + // Given a string, return a bytes object encoded. This is used for encoding a Python + // object for passing to a function expecting SQLCHAR* or SQLWCHAR*. }; -struct SQLWChar +class SQLWChar { - // Encodes a Python string to a SQLWCHAR pointer. This should eventually replace the - // SQLWchar structure. + // A convenience object that encodes a Unicode string to a given encoding. It can be cast + // to a SQLWCHAR* to return the pointer. // - // Note: This does *not* increment the refcount! - - // IMPORTANT: I've made the conscious decision *not* to determine the character count. If - // we only had to follow the ODBC specification, it would simply be the number of - // characters in the string and would be the bytelen / 2. The problem is drivers that - // don't follow the specification and expect things like UTF-8. What length do these - // drivers expect? Very, very likely they want the number of *bytes*, not the actual - // number of characters. I'm simply going to null terminate and pass SQL_NTS. + // This is designed to be created on the stack, perform the conversion, and cleanup any + // temporary objects in the destructor. // - // This is a performance penalty when using utf16 since we have to copy the string just to - // add the null terminator bytes, but we don't use it very often. If this becomes a - // bottleneck, we'll have to revisit this design. - - SQLWCHAR* psz; - bool isNone; + // The SQLWCHAR pointer is *only* valid during the lifetime of this object. It may point + // into a temporary `bytes` object that is deleted by the constructor. - Object bytes; - // A temporary object holding the decoded bytes if we can't use a pointer into the original - // object. +public: + SQLWChar() + { + psz = 0; + isNone = true; + } SQLWChar(PyObject* src, const char* szEncoding) { - TextEnc enc; - enc.name = szEncoding; - enc.ctype = SQL_C_WCHAR; - enc.optenc = (strcmp(szEncoding, "raw") == 0) ? OPTENC_RAW : OPTENC_NONE; - init(src, enc); + psz = 0; + isNone = true; + set(src, szEncoding); } SQLWChar(PyObject* src, const TextEnc* penc) + : SQLWChar(src, *penc) { - init(src, *penc); } SQLWChar(PyObject* src, const TextEnc& enc) @@ -131,7 +104,30 @@ struct SQLWChar return psz != 0; } + void set(PyObject* src, const char* szEncoding) { + bytes.Attach(0); // free old, if any + psz = 0; + isNone = true; + + TextEnc enc; + enc.name = szEncoding; + enc.ctype = SQL_C_WCHAR; + enc.optenc = OPTENC_NONE; + init(src, enc); + } + + SQLWCHAR* get() { return psz; } + + operator SQLWCHAR*() { return psz; } + private: + SQLWCHAR* psz; + bool isNone; + + Object bytes; + // A temporary object holding the decoded bytes if we can't use a pointer into the original + // object. + void init(PyObject* src, const TextEnc& enc); SQLWChar(const SQLWChar&) {} @@ -142,7 +138,8 @@ struct SQLWChar PyObject* TextBufferToObject(const TextEnc& enc, const byte* p, Py_ssize_t len); // Convert a text buffer to a Python object using the given encoding. // -// The buffer can be a SQLCHAR array or SQLWCHAR array. The text encoding -// should match it. +// - pbData :: The buffer, which is an array of SQLCHAR or SQLWCHAR. We treat it as bytes here +// since the encoding `enc` tells us how to treat it. +// - cbData :: The length of `pbData` in *bytes*. #endif // _TEXTENC_H diff --git a/src/wrapper.h b/src/wrapper.h index df7526bd..8807a0de 100644 --- a/src/wrapper.h +++ b/src/wrapper.h @@ -57,6 +57,13 @@ class Object return p; } + operator PyTupleObject*() + { + // This is a bit weird. I'm surprised the PyTuple_ functions and macros don't just use + // PyObject. + return (PyTupleObject*)p; + } + operator PyVarObject*() { return (PyVarObject*)p; } operator const bool() { return p != 0; } @@ -68,35 +75,6 @@ class Object }; -class Tuple - : public Object -{ -private: - Tuple(const Tuple& other) {} - void operator=(const Tuple& other) {} - -public: - - Tuple(PyObject* _p = 0) - : Object(_p) - { - } - - operator PyTupleObject*() - { - return (PyTupleObject*)p; - } - - PyObject*& operator[](int i) - { - I(p != 0); - return PyTuple_GET_ITEM(p, i); - } - - Py_ssize_t size() { return p ? PyTuple_GET_SIZE(p) : 0; } -}; - - #ifdef WINVER struct RegKey { diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..f6f4c05b --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,5 @@ +# This file is required to simplify running pytest. +# +# Build pyodbc into the project root by running: python setup.py build_ext --inplace +# +# Then run pytest from the root: pytest tests/postgresql_test.py diff --git a/tests/mysql_test.py b/tests/mysql_test.py new file mode 100644 index 00000000..2750bd94 --- /dev/null +++ b/tests/mysql_test.py @@ -0,0 +1,491 @@ +""" +pytest unit tests for MySQL. Uses a DNS name 'mysql' and uses UTF-8 +""" +# -*- coding: utf-8 -*- + +import os +from decimal import Decimal +from datetime import date, datetime +from functools import lru_cache + +import pyodbc, pytest + + +CNXNSTR = os.environ.get('PYODBC_MYSQL', 'DSN=mysql;charset=utf8mb4') + + +def connect(autocommit=False, attrs_before=None): + c = pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before) + + # As of libmyodbc5w 5.3 SQLGetTypeInfo returns absurdly small sizes + # leading to slow writes. Override them: + c.maxwrite = 1024 * 1024 * 1024 + + # My MySQL configuration (and I think the default) sends *everything* + # in UTF-8. The pyodbc default is to send Unicode as UTF-16 and to + # decode WCHAR via UTF-16. Change them both to UTF-8. + c.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') + c.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') + c.setencoding(encoding='utf-8') + + return c + + +@pytest.fixture +def cursor(): + cnxn = connect() + + cur = cnxn.cursor() + + cur.execute("drop table if exists t1") + cur.execute("drop table if exists t2") + cur.execute("drop table if exists t3") + cnxn.commit() + + yield cur + + if not cnxn.closed: + cur.close() + cnxn.close() + + +def test_text(cursor): + _test_vartype(cursor, 'text') + + +def test_varchar(cursor): + _test_vartype(cursor, 'varchar') + + +def test_varbinary(cursor): + _test_vartype(cursor, 'varbinary') + + +def test_blob(cursor): + _test_vartype(cursor, 'blob') + + +def _test_vartype(cursor, datatype): + cursor.execute(f"create table t1(c1 {datatype}(4000))") + + for length in [None, 0, 100, 1000, 4000]: + cursor.execute("delete from t1") + + encoding = (datatype in ('blob', 'varbinary')) and 'utf8' or None + value = _generate_str(length, encoding=encoding) + + cursor.execute("insert into t1 values(?)", value) + v = cursor.execute("select * from t1").fetchone()[0] + assert v == value + + +def test_char(cursor): + value = "testing" + cursor.execute("create table t1(s char(7))") + cursor.execute("insert into t1 values(?)", "testing") + v = cursor.execute("select * from t1").fetchone()[0] + assert v == value + + +def test_int(cursor): + _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678]) + + +def test_bigint(cursor): + _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 0x123456789, 0x7FFFFFFF, 0xFFFFFFFF, + 0x123456789]) + + +def test_float(cursor): + _test_scalar(cursor, 'float', [None, -1, 0, 1, 1234.5, -200]) + + +def _test_scalar(cursor, datatype, values): + cursor.execute(f"create table t1(c1 {datatype})") + for value in values: + print('value:', value) + cursor.execute("delete from t1") + cursor.execute("insert into t1 values (?)", value) + v = cursor.execute("select c1 from t1").fetchone()[0] + assert v == value + + +def test_decimal(cursor): + tests = [ + ('100010', '19'), # The ODBC docs tell us how the bytes should look in the C struct + ('1000.10', '20,6'), + ('-10.0010', '19,4') + ] + + for value, prec in tests: + value = Decimal(value) + cursor.execute("drop table if exists t1") + cursor.execute(f"create table t1(c1 numeric({prec}))") + cursor.execute("insert into t1 values (?)", value) + v = cursor.execute("select c1 from t1").fetchone()[0] + assert v == value + + +def test_multiple_bindings(cursor): + "More than one bind and select on a cursor" + cursor.execute("create table t1(n int)") + cursor.execute("insert into t1 values (?)", 1) + cursor.execute("insert into t1 values (?)", 2) + cursor.execute("insert into t1 values (?)", 3) + for i in range(3): + cursor.execute("select n from t1 where n < ?", 10) + cursor.execute("select n from t1 where n < 3") + + +def test_different_bindings(cursor): + cursor.execute("create table t1(n int)") + cursor.execute("create table t2(d datetime)") + cursor.execute("insert into t1 values (?)", 1) + cursor.execute("insert into t2 values (?)", datetime.now()) + + +def test_drivers(): + p = pyodbc.drivers() + assert isinstance(p, list) + + +def test_datasources(): + p = pyodbc.dataSources() + assert isinstance(p, dict) + + +def test_getinfo_string(): + cnxn = connect() + value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) + assert isinstance(value, str) + + +def test_getinfo_bool(): + cnxn = connect() + value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) + assert isinstance(value, bool) + + +def test_getinfo_int(): + cnxn = connect() + value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) + assert isinstance(value, int) + + +def test_getinfo_smallint(): + cnxn = connect() + value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) + assert isinstance(value, int) + + +def test_subquery_params(cursor): + """Ensure parameter markers work in a subquery""" + cursor.execute("create table t1(id integer, s varchar(20))") + cursor.execute("insert into t1 values (?,?)", 1, 'test') + row = cursor.execute(""" + select x.id + from ( + select id + from t1 + where s = ? + and id between ? and ? + ) x + """, 'test', 1, 10).fetchone() + assert row[0] == 1 + + +def test_close_cnxn(): + """Make sure using a Cursor after closing its connection doesn't crash.""" + + cnxn = connect() + cursor = cnxn.cursor() + + cursor.execute("drop table if exists t1") + cursor.execute("create table t1(id integer, s varchar(20))") + cursor.execute("insert into t1 values (?,?)", 1, 'test') + cursor.execute("select * from t1") + + cnxn.close() + + # Now that the connection is closed, we expect an exception. (If the code attempts to use + # the HSTMT, we'll get an access violation instead.) + with pytest.raises(pyodbc.ProgrammingError): + cursor.execute("select * from t1") + + +def test_negative_row_index(cursor): + cursor.execute("create table t1(s varchar(20))") + cursor.execute("insert into t1 values(?)", "1") + row = cursor.execute("select * from t1").fetchone() + assert row[0] == "1" + assert row[-1] == "1" + + +def test_version(cursor): + assert 3 == len(pyodbc.version.split('.')) # 1.3.1 etc. + + +def test_date(cursor): + value = date(2001, 1, 1) + + cursor.execute("create table t1(dt date)") + cursor.execute("insert into t1 values (?)", value) + + result = cursor.execute("select dt from t1").fetchone()[0] + assert type(result) == type(value) + assert result == value + + +def test_time(cursor): + value = datetime.now().time() + + # We aren't yet writing values using the new extended time type so the value written to the + # database is only down to the second. + value = value.replace(microsecond=0) + + cursor.execute("create table t1(t time)") + cursor.execute("insert into t1 values (?)", value) + + result = cursor.execute("select t from t1").fetchone()[0] + assert value == result + + +def test_datetime(cursor): + value = datetime(2007, 1, 15, 3, 4, 5) + + cursor.execute("create table t1(dt datetime)") + cursor.execute("insert into t1 values (?)", value) + + result = cursor.execute("select dt from t1").fetchone()[0] + assert value == result + + +def test_rowcount_delete(cursor): + cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + cursor.execute("insert into t1 values (?)", i) + cursor.execute("delete from t1") + assert cursor.rowcount == count + + +def test_rowcount_nodata(cursor): + """ + This represents a different code path than a delete that deleted something. + + The return value is SQL_NO_DATA and code after it was causing an error. We could use + SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount + code. On the other hand, we could hardcode a zero return value. + """ + cursor.execute("create table t1(i int)") + # This is a different code path internally. + cursor.execute("delete from t1") + assert cursor.rowcount == 0 + + +def test_rowcount_select(cursor): + """ + Ensure Cursor.rowcount is set properly after a select statement. + + pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount. Databases can return + the actual rowcount or they can return -1 if it would help performance. MySQL seems to + always return the correct rowcount. + """ + cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + cursor.execute("insert into t1 values (?)", i) + cursor.execute("select * from t1") + assert cursor.rowcount == count + + rows = cursor.fetchall() + assert len(rows) == count + assert cursor.rowcount == count + + +def test_rowcount_reset(cursor): + "Ensure rowcount is reset to -1" + + # The Python DB API says that rowcount should be set to -1 and most ODBC drivers let us + # know there are no records. MySQL always returns 0, however. Without parsing the SQL + # (which we are not going to do), I'm not sure how we can tell the difference and set the + # value to -1. For now, I'll have this test check for 0. + + cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + cursor.execute("insert into t1 values (?)", i) + assert cursor.rowcount == 1 + + cursor.execute("create table t2(i int)") + assert cursor.rowcount == 0 + + +def test_lower_case(): + "Ensure pyodbc.lowercase forces returned column names to lowercase." + + # Has to be set before creating the cursor + cnxn = connect() + pyodbc.lowercase = True + cursor = cnxn.cursor() + + cursor.execute("drop table if exists t1") + + cursor.execute("create table t1(Abc int, dEf int)") + cursor.execute("select * from t1") + + names = [t[0] for t in cursor.description] + names.sort() + + assert names == ["abc", "def"] + + # Put it back so other tests don't fail. + pyodbc.lowercase = False + + +def test_row_description(cursor): + """ + Ensure Cursor.description is accessible as Row.cursor_description. + """ + cursor.execute("create table t1(a int, b char(3))") + cursor.execute("insert into t1 values(1, 'abc')") + row = cursor.execute("select * from t1").fetchone() + assert cursor.description == row.cursor_description + + +def test_executemany(cursor): + cursor.execute("create table t1(a int, b varchar(10))") + + params = [(i, str(i)) for i in range(1, 6)] + + cursor.executemany("insert into t1(a, b) values (?,?)", params) + + count = cursor.execute("select count(*) from t1").fetchone()[0] + assert count == len(params) + + cursor.execute("select a, b from t1 order by a") + rows = cursor.fetchall() + assert count == len(rows) + + for param, row in zip(params, rows): + assert param[0] == row[0] + assert param[1] == row[1] + + +def test_executemany_one(cursor): + "Pass executemany a single sequence" + cursor.execute("create table t1(a int, b varchar(10))") + + params = [(1, "test")] + + cursor.executemany("insert into t1(a, b) values (?,?)", params) + + count = cursor.execute("select count(*) from t1").fetchone()[0] + assert count == len(params) + + cursor.execute("select a, b from t1 order by a") + rows = cursor.fetchall() + assert count == len(rows) + + for param, row in zip(params, rows): + assert param[0] == row[0] + assert param[1] == row[1] + + +def test_row_slicing(cursor): + cursor.execute("create table t1(a int, b int, c int, d int)") + cursor.execute("insert into t1 values(1,2,3,4)") + + row = cursor.execute("select * from t1").fetchone() + + result = row[:] + assert result is row + + result = row[:-1] + assert result == (1, 2, 3) + + result = row[0:4] + assert result is row + + +def test_row_repr(cursor): + cursor.execute("create table t1(a int, b int, c int, d int)") + cursor.execute("insert into t1 values(1,2,3,4)") + + row = cursor.execute("select * from t1").fetchone() + + result = str(row) + assert result == "(1, 2, 3, 4)" + + result = str(row[:-1]) + assert result == "(1, 2, 3)" + + result = str(row[:1]) + assert result == "(1,)" + + +def test_emoticons_as_parameter(cursor): + # https://github.com/mkleehammer/pyodbc/issues/423 + # + # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number + # of characters. Ensure it works even with 4-byte characters. + # + # http://www.fileformat.info/info/unicode/char/1f31c/index.htm + + v = "x \U0001F31C z" + + cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") + cursor.execute("insert into t1 values (?)", v) + + result = cursor.execute("select s from t1").fetchone()[0] + + assert result == v + + +def test_emoticons_as_literal(cursor): + # https://github.com/mkleehammer/pyodbc/issues/630 + + v = "x \U0001F31C z" + + cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") + cursor.execute("insert into t1 values ('%s')" % v) + + result = cursor.execute("select s from t1").fetchone()[0] + + assert result == v + + +@lru_cache() +def _generate_str(length, encoding=None): + """ + Returns either a string or bytes, depending on whether encoding is provided, + that is `length` elements long. + + If length is None, None is returned. This simplifies the tests by letting us put None into + an array of other lengths and pass them here, moving the special case check into one place. + """ + if length is None: + return None + + # Put non-ASCII characters at the front so we don't end up chopping one in half in a + # multi-byte encoding like UTF-8. + + v = 'á' + + remaining = max(0, length - len(v)) + if remaining: + seed = '0123456789-abcdefghijklmnopqrstuvwxyz-' + + if remaining <= len(seed): + v += seed + else: + c = (remaining + len(seed) - 1 // len(seed)) + v += seed * c + + if encoding: + v = v.encode(encoding) + + # We chop *after* encoding because if we are encoding then we want bytes. + v = v[:length] + + return v diff --git a/tests/old/README.md b/tests/old/README.md new file mode 100644 index 00000000..697a2f89 --- /dev/null +++ b/tests/old/README.md @@ -0,0 +1,2 @@ +These tests have not been ported to pytest. If you want to help, please do so and move up to +the tests directory. diff --git a/tests3/accesstests.py b/tests/old/accesstests.py similarity index 100% rename from tests3/accesstests.py rename to tests/old/accesstests.py diff --git a/tests2/empty.accdb b/tests/old/empty.accdb similarity index 100% rename from tests2/empty.accdb rename to tests/old/empty.accdb diff --git a/tests2/empty.mdb b/tests/old/empty.mdb similarity index 100% rename from tests2/empty.mdb rename to tests/old/empty.mdb diff --git a/tests3/exceltests.py b/tests/old/exceltests.py similarity index 100% rename from tests3/exceltests.py rename to tests/old/exceltests.py diff --git a/tests3/informixtests.py b/tests/old/informix_test.py similarity index 100% rename from tests3/informixtests.py rename to tests/old/informix_test.py diff --git a/tests3/sparktests.py b/tests/old/sparktests.py similarity index 100% rename from tests3/sparktests.py rename to tests/old/sparktests.py diff --git a/tests3/sqldwtests.py b/tests/old/sqldwtests.py similarity index 100% rename from tests3/sqldwtests.py rename to tests/old/sqldwtests.py diff --git a/tests3/sqlitetests.py b/tests/old/sqlitetests.py similarity index 100% rename from tests3/sqlitetests.py rename to tests/old/sqlitetests.py diff --git a/tests/postgresql_test.py b/tests/postgresql_test.py new file mode 100644 index 00000000..58a46b9e --- /dev/null +++ b/tests/postgresql_test.py @@ -0,0 +1,594 @@ +""" +Unit tests for PostgreSQL +""" +# -*- coding: utf-8 -*- + +import os, uuid +from decimal import Decimal + +import pyodbc, pytest + + +CNXNSTR = os.environ.get('PYODBC_POSTGRESQL', 'DSN=pyodbc-postgres') + + +def connect(autocommit=False, attrs_before=None): + return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before) + + +@pytest.fixture +def cursor(): + cnxn = connect() + cur = cnxn.cursor() + + cur.execute("drop table if exists t1") + cur.execute("drop table if exists t2") + cur.execute("drop table if exists t3") + cnxn.commit() + + yield cur + + if not cnxn.closed: + cur.close() + cnxn.close() + + +def _generate_str(length, encoding=None): + """ + Returns either a string or bytes, depending on whether encoding is provided, + that is `length` elements long. + + If length is None, None is returned. This simplifies the tests by letting us put None into + an array of other lengths and pass them here, moving the special case check into one place. + """ + if length is None: + return None + + seed = '0123456789-abcdefghijklmnopqrstuvwxyz-' + + if length <= len(seed): + v = seed + else: + c = (length + len(seed) - 1 // len(seed)) + v = seed * c + + v = v[:length] + if encoding: + v = v.encode(encoding) + + return v + + +def test_text(cursor): + cursor.execute("create table t1(col text)") + + # Two different read code paths exist based on the length. Using 100 and 4000 will ensure + # both are tested. + for length in [None, 0, 100, 1000, 4000]: + cursor.execute("truncate table t1") + param = _generate_str(length) + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_text_many(cursor): + + # This shouldn't make a difference, but we'll ensure we can read and write from multiple + # columns at the same time. + + cursor.execute("create table t1(col1 text, col2 text, col3 text)") + + v1 = 'ABCDEFGHIJ' * 30 + v2 = '0123456789' * 30 + v3 = '9876543210' * 30 + + cursor.execute("insert into t1(col1, col2, col3) values (?,?,?)", v1, v2, v3) + row = cursor.execute("select col1, col2, col3 from t1").fetchone() + + assert v1 == row.col1 + assert v2 == row.col2 + assert v3 == row.col3 + + +def test_chinese(cursor): + v = '我的' + row = cursor.execute("SELECT N'我的' AS name").fetchone() + assert row[0] == v + + rows = cursor.execute("SELECT N'我的' AS name").fetchall() + assert rows[0][0] == v + + +def test_bytea(cursor): + cursor.execute("create table t1(col bytea)") + + for length in [None, 0, 100, 1000, 4000]: + cursor.execute("truncate table t1") + param = _generate_str(length, 'utf8') + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_bytearray(cursor): + """ + We will accept a bytearray and treat it like bytes, but when reading we'll still + get bytes back. + """ + cursor.execute("create table t1(col bytea)") + + # Two different read code paths exist based on the length. Using 100 and 4000 will ensure + # both are tested. + for length in [0, 100, 1000, 4000]: + cursor.execute("truncate table t1") + bytes = _generate_str(length, 'utf8') + param = bytearray(bytes) + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == bytes + + +def test_int(cursor): + cursor.execute("create table t1(col int)") + for param in [None, -1, 0, 1, 0x7FFFFFFF]: + cursor.execute("truncate table t1") + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_bigint(cursor): + cursor.execute("create table t1(col bigint)") + for param in [None, -1, 0, 1, 0x7FFFFFFF, 0xFFFFFFFF, 0x123456789]: + cursor.execute("truncate table t1") + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_float(cursor): + cursor.execute("create table t1(col float)") + for param in [None, -1, 0, 1, -200, 20000]: + cursor.execute("truncate table t1") + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_decimal(cursor): + cursor.execute("create table t1(col decimal(20,6))") + + # Note: Use strings to initialize the decimals to eliminate floating point rounding. + # + # Also, the ODBC docs show the value 100010 in the C struct, so I've included it here, + # along with a couple of shifted versions. + params = [Decimal(n) for n in "-1000.10 -1234.56 -1 0 1 1000.10 1234.56 100010 123456789.21".split()] + params.append(None) + + for param in params: + cursor.execute("truncate table t1") + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_numeric(cursor): + cursor.execute("create table t1(col numeric(20,6))") + + # Note: Use strings to initialize the decimals to eliminate floating point rounding. + params = [Decimal(n) for n in "-1234.56 -1 0 1 1234.56 123456789.21".split()] + params.append(None) + + for param in params: + cursor.execute("truncate table t1") + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_maxwrite(cursor): + # If we write more than `maxwrite` bytes, pyodbc will switch from binding the data all at + # once to providing it at execute time with SQLPutData. The default maxwrite is 1GB so + # this is rarely needed in PostgreSQL but I need to test the functionality somewhere. + cursor.connection.maxwrite = 300 + + cursor.execute("create table t1(col text)") + param = _generate_str(400) + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_nonnative_uuid(cursor): + pyodbc.native_uuid = False + + param = uuid.uuid4() + cursor.execute("create table t1(n uuid)") + cursor.execute("insert into t1 values (?)", param) + + result = cursor.execute("select n from t1").fetchval() + assert isinstance(result, str) + assert result == str(param).upper() + + +def test_native_uuid(cursor): + pyodbc.native_uuid = True + # When true, we should return a uuid.UUID object. + + param = uuid.uuid4() + cursor.execute("create table t1(n uuid)") + cursor.execute("insert into t1 values (?)", param) + + result = cursor.execute("select n from t1").fetchval() + assert isinstance(result, uuid.UUID) + assert param == result + + +def test_close_cnxn(cursor): + """Make sure using a Cursor after closing its connection doesn't crash.""" + + cursor.execute("create table t1(id integer, s varchar(20))") + cursor.execute("insert into t1 values (?,?)", 1, 'test') + cursor.execute("select * from t1") + + cursor.connection.close() + + # Now that the connection is closed, we expect an exception. (If the code attempts to use + # the HSTMT, we'll get an access violation instead.) + + with pytest.raises(pyodbc.ProgrammingError): + cursor.execute("select * from t1") + + +def test_version(): + assert len(pyodbc.version.split('.')) == 3 + + +def test_rowcount(cursor): + assert cursor.rowcount == -1 + # The spec says it should be -1 when not in use. + + cursor.execute("create table t1(col int)") + count = 4 + for i in range(count): + cursor.execute("insert into t1 values (?)", i) + + cursor.execute("select * from t1") + assert cursor.rowcount == count + + cursor.execute("update t1 set col=col+1") + assert cursor.rowcount == count + + cursor.execute("delete from t1") + assert cursor.rowcount == count + + # This is a different code path - the value internally is SQL_NO_DATA instead of an empty + # result set. Just make sure it doesn't crash. + cursor.execute("delete from t1") + assert cursor.rowcount == 0 + + # IMPORTANT: The ODBC spec says it should be -1 after the create table, but the PostgreSQL + # driver is telling pyodbc the rowcount is 0. Since we have no way of knowing when to + # override it, we'll just update the test to ensure it is consistently zero. + + cursor.execute("create table t2(i int)") + assert cursor.rowcount == 0 + + +def test_row_description(cursor): + """ + Ensure Cursor.description is accessible as Row.cursor_description. + """ + cursor.execute("create table t1(col1 int, col2 char(3))") + cursor.execute("insert into t1 values(1, 'abc')") + + row = cursor.execute("select col1, col2 from t1").fetchone() + + assert row.cursor_description == cursor.description + + +def test_lower_case(cursor): + "Ensure pyodbc.lowercase forces returned column names to lowercase." + + try: + pyodbc.lowercase = True + + cursor.execute("create table t1(Abc int, dEf int)") + cursor.execute("select * from t1") + + names = {t[0] for t in cursor.description} + assert names == {'abc', 'def'} + finally: + pyodbc.lowercase = False + + +def test_executemany(cursor): + + cursor.execute("create table t1(col1 int, col2 varchar(10))") + params = [(i, str(i)) for i in range(1, 6)] + + # Without fast_executemany + + cursor.executemany("insert into t1(col1, col2) values (?,?)", params) + cursor.execute("select col1, col2 from t1 order by col1") + results = [tuple(row) for row in cursor] + assert results == params + + # With fast_executemany + + try: + pyodbc.fast_executemany = True + cursor.execute("truncate table t1") + cursor.executemany("insert into t1(col1, col2) values (?,?)", params) + cursor.execute("select col1, col2 from t1 order by col1") + results = [tuple(row) for row in cursor] + assert results == params + finally: + pyodbc.fast_executemany = False + + +def test_executemany_failure(cursor): + """ + Ensure that an exception is raised if one query in an executemany fails. + """ + cursor.execute("create table t1(a int, b varchar(10))") + + params = [ (1, 'good'), + ('error', 'not an int'), + (3, 'good') ] + + with pytest.raises(pyodbc.Error): + cursor.executemany("insert into t1(a, b) value (?, ?)", params) + + +def test_row_slicing(cursor): + cursor.execute("create table t1(a int, b int, c int, d int)") + cursor.execute("insert into t1 values(1,2,3,4)") + + row = cursor.execute("select * from t1").fetchone() + + result = row[:] + assert result is row # returned as is + + result = row[:-1] + assert result == (1, 2, 3) # returned as tuple + + result = row[0:4] + assert result is row + + +def test_drivers(): + p = pyodbc.drivers() + assert isinstance(p, list) + + +def test_datasources(): + p = pyodbc.dataSources() + assert isinstance(p, dict) + + +def test_getinfo_string(cursor): + value = cursor.connection.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) + assert isinstance(value, str) + + +def test_getinfo_bool(cursor): + value = cursor.connection.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) + assert isinstance(value, bool) + + +def test_getinfo_int(cursor): + value = cursor.connection.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) + assert isinstance(value, int) + + +def test_getinfo_smallint(cursor): + value = cursor.connection.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) + assert isinstance(value, int) + +def test_cnxn_execute_error(cursor): + """ + Make sure that Connection.execute (not Cursor) errors are not "eaten". + + GitHub issue #74 + """ + cursor.execute("create table t1(a int primary key)") + cursor.execute("insert into t1 values (1)") + with pytest.raises(pyodbc.Error): + cursor.connection.execute("insert into t1 values (1)") + +def test_row_repr(cursor): + cursor.execute("create table t1(a int, b int, c int, d int)") + cursor.execute("insert into t1 values(1,2,3,4)") + + row = cursor.execute("select * from t1").fetchone() + + result = str(row) + assert result == "(1, 2, 3, 4)" + + result = str(row[:-1]) + assert result == "(1, 2, 3)" + + result = str(row[:1]) + assert result == "(1,)" + + +def test_autocommit(cursor): + assert cursor.connection.autocommit is False + othercnxn = connect(autocommit=True) + assert othercnxn.autocommit is True + othercnxn.autocommit = False + assert othercnxn.autocommit is False + +def test_exc_integrity(cursor): + "Make sure an IntegretyError is raised" + # This is really making sure we are properly encoding and comparing the SQLSTATEs. + cursor.execute("create table t1(s1 varchar(10) primary key)") + cursor.execute("insert into t1 values ('one')") + with pytest.raises(pyodbc.IntegrityError): + cursor.execute("insert into t1 values ('one')") + + +def test_cnxn_set_attr_before(): + # I don't have a getattr right now since I don't have a table telling me what kind of + # value to expect. For now just make sure it doesn't crash. + # From the unixODBC sqlext.h header file. + SQL_ATTR_PACKET_SIZE = 112 + _cnxn = connect(attrs_before={ SQL_ATTR_PACKET_SIZE : 1024 * 32 }) + + +def test_cnxn_set_attr(cursor): + # I don't have a getattr right now since I don't have a table telling me what kind of + # value to expect. For now just make sure it doesn't crash. + # From the unixODBC sqlext.h header file. + SQL_ATTR_ACCESS_MODE = 101 + SQL_MODE_READ_ONLY = 1 + cursor.connection.set_attr(SQL_ATTR_ACCESS_MODE, SQL_MODE_READ_ONLY) + + +def test_columns(cursor): + driver_version = tuple( + int(x) for x in cursor.connection.getinfo(pyodbc.SQL_DRIVER_VER).split(".") + ) + + def _get_column_size(row): + # the driver changed the name of the returned columns in version 13.02. + # see https://odbc.postgresql.org/docs/release.html, release 13.02.0000, change 6. + return row.column_size if driver_version >= (13, 2, 0) else row.precision + + # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error + # + # Error: TypeError: argument 2 must be str, not None + # + # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an + # optional string keyword when calling indirectly. + + cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))") + + cursor.columns('t1') + results = {row.column_name: row for row in cursor} + row = results['a'] + assert row.type_name == 'int4', row.type_name + row = results['b'] + assert row.type_name == 'varchar' + assert _get_column_size(row) == 3, _get_column_size(row) + row = results['xΏz'] + assert row.type_name == 'varchar' + assert _get_column_size(row) == 4, _get_column_size(row) + + # Now do the same, but specifically pass in None to one of the keywords. Old versions + # were parsing arguments incorrectly and would raise an error. (This crops up when + # calling indirectly like columns(*args, **kwargs) which aiodbc does.) + + cursor.columns('t1', schema=None, catalog=None) + results = {row.column_name: row for row in cursor} + row = results['a'] + assert row.type_name == 'int4', row.type_name + row = results['b'] + assert row.type_name == 'varchar' + assert _get_column_size(row) == 3 + +def test_cancel(cursor): + # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with + # making sure SQLCancel is called correctly. + cursor.execute("select 1") + cursor.cancel() + +def test_emoticons_as_parameter(cursor): + # https://github.com/mkleehammer/pyodbc/issues/423 + # + # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number + # of characters. Ensure it works even with 4-byte characters. + # + # http://www.fileformat.info/info/unicode/char/1f31c/index.htm + + v = "x \U0001F31C z" + + cursor.execute("CREATE TABLE t1(s varchar(100))") + cursor.execute("insert into t1 values (?)", v) + + result = cursor.execute("select s from t1").fetchone()[0] + + assert result == v + +def test_emoticons_as_literal(cursor): + # https://github.com/mkleehammer/pyodbc/issues/630 + + v = "x \U0001F31C z" + + cursor.execute("CREATE TABLE t1(s varchar(100))") + cursor.execute(f"insert into t1 values ('{v}')") + + result = cursor.execute("select s from t1").fetchone()[0] + + assert result == v + + +def test_cursor_messages(cursor): + """ + Test the Cursor.messages attribute. + """ + # Using INFO message level because they are always sent to the client regardless of + + # client_min_messages: https://www.postgresql.org/docs/11/runtime-config-client.html + for msg in ('hello world', 'ABCDEFGHIJ' * 800): + cursor.execute(f""" + CREATE OR REPLACE PROCEDURE test_cursor_messages() + LANGUAGE plpgsql + AS $$ + BEGIN + RAISE INFO '{msg}' USING ERRCODE = '01000'; + END; + $$; + """) + cursor.execute("CALL test_cursor_messages();") + messages = cursor.messages + + # There is a maximum size for these so the second msg will actually generate a bunch of + # messages. To make it easier to compare, we'll stitch them back together. + + if len(messages) > 1: + concat = ''.join(t[1] for t in messages) + messages = [(messages[0][0], concat)] + + assert messages == [('[01000] (-1)', f'INFO: {msg}')] + + +def test_output_conversion(cursor): + # Note the use of SQL_WVARCHAR, not SQL_VARCHAR. + + def convert(value): + # The value is the raw bytes (as a bytes object) read from the + # database. We'll simply add an X at the beginning at the end. + return 'X' + value.decode('latin1') + 'X' + + cursor.execute("create table t1(n int, v varchar(10))") + cursor.execute("insert into t1 values (1, '123.45')") + + cursor.connection.add_output_converter(pyodbc.SQL_WVARCHAR, convert) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'X123.45X' + + # Clear all conversions and try again. There should be no Xs this time. + cursor.connection.clear_output_converters() + value = cursor.execute("select v from t1").fetchone()[0] + assert value == '123.45' + + # Same but clear using remove_output_converter. + cursor.connection.add_output_converter(pyodbc.SQL_WVARCHAR, convert) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'X123.45X' + + cursor.connection.remove_output_converter(pyodbc.SQL_WVARCHAR) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == '123.45' + + # And lastly, clear by passing None for the converter. + cursor.connection.add_output_converter(pyodbc.SQL_WVARCHAR, convert) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'X123.45X' + + cursor.connection.add_output_converter(pyodbc.SQL_WVARCHAR, None) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == '123.45' diff --git a/tests/sqlserver_test.py b/tests/sqlserver_test.py new file mode 100755 index 00000000..980bc0f6 --- /dev/null +++ b/tests/sqlserver_test.py @@ -0,0 +1,1665 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os, uuid, re, sys +from decimal import Decimal +from datetime import date, time, datetime +from functools import lru_cache + +import pyodbc, pytest + + +# WARNING: Wow Microsoft always manages to do the stupidest thing possible always trying to be +# smarter than everyone. I worked with their APIs for since before "OLE" and it has always +# been a nanny state. They won't read the UID and PWD from odbc.ini because it isn't secure. +# Really? Less secure than what? The next hack someone is going to use. Do the straight +# forward thing and explain how to secure it. it isn't their business how I deploy and secure. +# +# For every other DB we use a single default DSN but you can pass your own via an environment +# variable. For SS, we can't just use a default DSN unless you want to go trusted. (Which is +# more secure? No.) It'll be put into .bashrc most likely. Way to go. Now I'll go rename +# all of the others to DB specific names instead of PYODBC_CNXNSTR. Hot garbage as usual. + +CNXNSTR = os.environ.get('PYODBC_SQLSERVER', 'DSN=pyodbc-sqlserver') + + +def connect(autocommit=False, attrs_before=None): + return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before) + + +DRIVER = connect().getinfo(pyodbc.SQL_DRIVER_NAME) + +IS_FREEDTS = bool(re.search('tsodbc', DRIVER, flags=re.IGNORECASE)) +IS_MSODBCSQL = bool(re.search(r'(msodbcsql|sqlncli|sqlsrv32\.dll)', DRIVER, re.IGNORECASE)) + + +def _get_sqlserver_year(): + """ + Returns the release year of the current version of SQL Server, used to skip tests for + features that are not supported. If the current DB is not SQL Server, 0 is returned. + """ + # We used to use the major version, but most documentation on the web refers to the year + # (e.g. SQL Server 2019) so we'll use that for skipping tests that do not apply. + if not IS_MSODBCSQL: + return 0 + cnxn = connect() + cursor = cnxn.cursor() + row = cursor.execute("exec master..xp_msver 'ProductVersion'").fetchone() + major = row.Character_Value.split('.', 1)[0] + return { + # https://sqlserverbuilds.blogspot.com/ + '8': 2000, '9': 2005, '10': 2008, '11': 2012, '12': 2014, + '13': 2016, '14': 2017, '15': 2019, '16': 2022 + }[major] + + +SQLSERVER_YEAR = _get_sqlserver_year() + + +@pytest.fixture +def cursor(): + cnxn = connect() + cur = cnxn.cursor() + + cur.execute("drop table if exists t1") + cur.execute("drop table if exists t2") + cur.execute("drop table if exists t3") + cnxn.commit() + + yield cur + + if not cnxn.closed: + cur.close() + cnxn.close() + + +def test_text(cursor): + _test_vartype(cursor, 'text') + + +def test_varchar(cursor): + _test_vartype(cursor, 'varchar') + + +def test_nvarchar(cursor): + _test_vartype(cursor, 'nvarchar') + + +def test_varbinary(cursor): + _test_vartype(cursor, 'varbinary') + + +@pytest.mark.skipif(SQLSERVER_YEAR < 2005, reason='(max) not supported until 2005') +def test_unicode_longmax(cursor): + # Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes + cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))") + + +def test_char(cursor): + value = "testing" + cursor.execute("create table t1(s char(7))") + cursor.execute("insert into t1 values(?)", "testing") + v = cursor.execute("select * from t1").fetchone()[0] + assert v == value + + +def test_int(cursor): + _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678]) + + +def test_bigint(cursor): + _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 0x123456789, 0x7FFFFFFF, 0xFFFFFFFF, + 0x123456789]) + + +def test_overflow_int(cursor): + # python allows integers of any size, bigger than an 8 byte int can contain + input = 9999999999999999999999999999999999999 + cursor.execute("create table t1(d bigint)") + with pytest.raises(OverflowError): + cursor.execute("insert into t1 values (?)", input) + result = cursor.execute("select * from t1").fetchall() + assert result == [] + + +def test_float(cursor): + _test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200, .00012345]) + + +def test_non_numeric_float(cursor): + cursor.execute("create table t1(d float)") + for input in (float('+Infinity'), float('-Infinity'), float('NaN')): + with pytest.raises(pyodbc.ProgrammingError): + cursor.execute("insert into t1 values (?)", input) + + +def test_drivers(): + p = pyodbc.drivers() + assert isinstance(p, list) + + +def test_datasources(): + p = pyodbc.dataSources() + assert isinstance(p, dict) + + +def test_getinfo_string(): + cnxn = connect() + value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) + assert isinstance(value, str) + + +def test_getinfo_bool(): + cnxn = connect() + value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) + assert isinstance(value, bool) + + +def test_getinfo_int(): + cnxn = connect() + value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) + assert isinstance(value, int) + + +def test_getinfo_smallint(): + cnxn = connect() + value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) + assert isinstance(value, int) + + +def test_no_fetch(cursor): + # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without + # fetches seem to confuse the driver. + cursor.execute('select 1') + cursor.execute('select 1') + cursor.execute('select 1') + + +def test_decode_meta(cursor): + """ + Ensure column names with non-ASCII characters are converted using the configured encodings. + """ + # This is from GitHub issue #190 + cursor.execute("create table t1(a int)") + cursor.execute("insert into t1 values (1)") + cursor.execute('select a as "Tipología" from t1') + assert cursor.description[0][0] == "Tipología" + + +def test_exc_integrity(cursor): + "Make sure an IntegretyError is raised" + # This is really making sure we are properly encoding and comparing the SQLSTATEs. + cursor.execute("create table t1(s1 varchar(10) primary key)") + cursor.execute("insert into t1 values ('one')") + with pytest.raises(pyodbc.IntegrityError): + cursor.execute("insert into t1 values ('one')") + + +def test_multiple_bindings(cursor): + "More than one bind and select on a cursor" + cursor.execute("create table t1(n int)") + cursor.execute("insert into t1 values (?)", 1) + cursor.execute("insert into t1 values (?)", 2) + cursor.execute("insert into t1 values (?)", 3) + for _ in range(3): + cursor.execute("select n from t1 where n < ?", 10) + cursor.execute("select n from t1 where n < 3") + + +def test_different_bindings(cursor): + cursor.execute("create table t1(n int)") + cursor.execute("create table t2(d datetime)") + cursor.execute("insert into t1 values (?)", 1) + cursor.execute("insert into t2 values (?)", datetime.now()) + + +SMALL_FENCEPOST_SIZES = [None, 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000] +LARGE_FENCEPOST_SIZES = SMALL_FENCEPOST_SIZES + [4095, 4096, 4097, 10 * 1024, 20 * 1024] + + +def _test_vartype(cursor, datatype): + + if datatype == 'text': + lengths = LARGE_FENCEPOST_SIZES + else: + lengths = SMALL_FENCEPOST_SIZES + + if datatype == 'text': + cursor.execute(f"create table t1(c1 {datatype})") + else: + maxlen = lengths[-1] + cursor.execute(f"create table t1(c1 {datatype}({maxlen}))") + + for length in lengths: + cursor.execute("delete from t1") + + encoding = (datatype in ('blob', 'varbinary')) and 'utf8' or None + value = _generate_str(length, encoding=encoding) + + try: + cursor.execute("insert into t1 values(?)", value) + except pyodbc.Error as ex: + msg = f'{datatype} insert failed: length={length} len={len(value)}' + raise Exception(msg) from ex + + v = cursor.execute("select * from t1").fetchone()[0] + assert v == value + + +def _test_scalar(cursor, datatype, values): + """ + A simple test wrapper for types that are identical when written and read. + """ + cursor.execute(f"create table t1(c1 {datatype})") + for value in values: + print('value:', value) + cursor.execute("delete from t1") + cursor.execute("insert into t1 values (?)", value) + v = cursor.execute("select c1 from t1").fetchone()[0] + assert v == value + + +def test_noscan(cursor): + assert cursor.noscan is False + cursor.noscan = True + assert cursor.noscan is True + + +def test_nonnative_uuid(cursor): + # The default is False meaning we should return a string. Note that + # SQL Server seems to always return uppercase. + value = uuid.uuid4() + cursor.execute("create table t1(n uniqueidentifier)") + cursor.execute("insert into t1 values (?)", value) + + pyodbc.native_uuid = False + result = cursor.execute("select n from t1").fetchval() + assert isinstance(result, str) + assert result == str(value).upper() + + +def test_native_uuid(cursor): + # When true, we should return a uuid.UUID object. + value = uuid.uuid4() + cursor.execute("create table t1(n uniqueidentifier)") + cursor.execute("insert into t1 values (?)", value) + + pyodbc.native_uuid = True + result = cursor.execute("select n from t1").fetchval() + assert isinstance(result, uuid.UUID) + assert value == result + + +def test_nextset(cursor): + cursor.execute("create table t1(i int)") + for i in range(4): + cursor.execute("insert into t1(i) values(?)", i) + + cursor.execute( + """ + select i from t1 where i < 2 order by i; + select i from t1 where i >= 2 order by i + """) + + for i, row in enumerate(cursor): + assert i == row.i + + assert cursor.nextset() + + for i, row in enumerate(cursor): + assert i + 2 == row.i + + +@pytest.mark.skipif(IS_FREEDTS, reason='https://github.com/FreeTDS/freetds/issues/230') +def test_nextset_with_raiserror(cursor): + cursor.execute("select i = 1; RAISERROR('c', 16, 1);") + row = next(cursor) + assert 1 == row.i + with pytest.raises(pyodbc.ProgrammingError): + cursor.nextset() + + +def test_fixed_unicode(cursor): + value = "t\xebsting" + cursor.execute("create table t1(s nchar(7))") + cursor.execute("insert into t1 values(?)", "t\xebsting") + v = cursor.execute("select * from t1").fetchone()[0] + assert isinstance(v, str) + assert len(v) == len(value) + # If we alloc'd wrong, the test below might work because of an embedded NULL + assert v == value + + +def test_chinese(cursor): + v = '我的' + cursor.execute("SELECT N'我的' AS [Name]") + row = cursor.fetchone() + assert row[0] == v + + cursor.execute("SELECT N'我的' AS [Name]") + rows = cursor.fetchall() + assert rows[0][0] == v + + +def test_bit(cursor): + value = True + cursor.execute("create table t1(b bit)") + cursor.execute("insert into t1 values (?)", value) + v = cursor.execute("select b from t1").fetchone()[0] + assert isinstance(v, bool) + assert v == value + + +def test_decimal(cursor): + # From test provided by planders (thanks!) in Issue 91 + + for (precision, scale, negative) in [ + (1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True), + (6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True), + (38, 10, True), (38, 38, True)]: + + try: + cursor.execute("drop table t1") + except: + pass + + cursor.execute(f"create table t1(d decimal({precision}, {scale}))") + + # Construct a decimal that uses the maximum precision and scale. + sign = negative and '-' or '' + before = '9' * (precision - scale) + after = scale and ('.' + '9' * scale) or '' + decStr = f'{sign}{before}{after}' + value = Decimal(decStr) + + cursor.execute("insert into t1 values(?)", value) + + v = cursor.execute("select d from t1").fetchone()[0] + assert v == value + + +def test_decimal_e(cursor): + """Ensure exponential notation decimals are properly handled""" + value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 + cursor.execute("create table t1(d decimal(10, 2))") + cursor.execute("insert into t1 values (?)", value) + result = cursor.execute("select * from t1").fetchone()[0] + assert result == value + + +def test_subquery_params(cursor): + """Ensure parameter markers work in a subquery""" + cursor.execute("create table t1(id integer, s varchar(20))") + cursor.execute("insert into t1 values (?,?)", 1, 'test') + row = cursor.execute(""" + select x.id + from ( + select id + from t1 + where s = ? + and id between ? and ? + ) x + """, 'test', 1, 10).fetchone() + assert row is not None + assert row[0] == 1 + + +def test_close_cnxn(): + """Make sure using a Cursor after closing its connection doesn't crash.""" + + cnxn = connect() + cursor = cnxn.cursor() + + cursor.execute("drop table if exists t1") + cursor.execute("create table t1(id integer, s varchar(20))") + cursor.execute("insert into t1 values (?,?)", 1, 'test') + cursor.execute("select * from t1") + + cnxn.close() + + # Now that the connection is closed, we expect an exception. (If the code attempts to use + # the HSTMT, we'll get an access violation instead.) + with pytest.raises(pyodbc.ProgrammingError): + cursor.execute("select * from t1") + + +def test_empty_string(cursor): + cursor.execute("create table t1(s varchar(20))") + cursor.execute("insert into t1 values(?)", "") + + +def test_empty_string_encoding(): + cnxn = connect() + cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') + value = "" + cursor = cnxn.cursor() + cursor.execute("create table t1(s varchar(20))") + cursor.execute("insert into t1 values(?)", value) + v = cursor.execute("select * from t1").fetchone()[0] + assert v == value + + +def test_fixed_str(cursor): + value = "testing" + cursor.execute("create table t1(s char(7))") + cursor.execute("insert into t1 values(?)", value) + v = cursor.execute("select * from t1").fetchone()[0] + assert isinstance(v, str) + assert len(v) == len(value) + # If we alloc'd wrong, the test below might work because of an embedded NULL + assert v == value + + +def test_empty_unicode(cursor): + cursor.execute("create table t1(s nvarchar(20))") + cursor.execute("insert into t1 values(?)", "") + + +def test_empty_unicode_encoding(): + cnxn = connect() + cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') + value = "" + cursor = cnxn.cursor() + cursor.execute("create table t1(s nvarchar(20))") + cursor.execute("insert into t1 values(?)", value) + v = cursor.execute("select * from t1").fetchone()[0] + assert v == value + + +def test_negative_row_index(cursor): + cursor.execute("create table t1(s varchar(20))") + cursor.execute("insert into t1 values(?)", "1") + row = cursor.execute("select * from t1").fetchone() + assert row[0] == "1" + assert row[-1] == "1" + + +def test_version(): + assert 3 == len(pyodbc.version.split('.')) # 1.3.1 etc. + + +@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008, + reason='Date not supported until 2008?') +def test_date(cursor): + value = date.today() + + cursor.execute("create table t1(d date)") + cursor.execute("insert into t1 values (?)", value) + + result = cursor.execute("select d from t1").fetchone()[0] + assert isinstance(result, date) + assert value == result + + +@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008, + reason='Time not supported until 2008?') +def test_time(cursor): + value = datetime.now().time() + + # We aren't yet writing values using the new extended time type so the value written to the + # database is only down to the second. + value = value.replace(microsecond=0) + + cursor.execute("create table t1(t time)") + cursor.execute("insert into t1 values (?)", value) + + result = cursor.execute("select t from t1").fetchone()[0] + assert isinstance(result, time) + assert value == result + + +def test_datetime(cursor): + value = datetime(2007, 1, 15, 3, 4, 5) + + cursor.execute("create table t1(dt datetime)") + cursor.execute("insert into t1 values (?)", value) + + result = cursor.execute("select dt from t1").fetchone()[0] + assert isinstance(result, datetime) + assert value == result + + +def test_datetime_fraction(cursor): + # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most + # granular datetime supported is xxx000. + + value = datetime(2007, 1, 15, 3, 4, 5, 123000) + + cursor.execute("create table t1(dt datetime)") + cursor.execute("insert into t1 values (?)", value) + + result = cursor.execute("select dt from t1").fetchone()[0] + assert isinstance(result, datetime) + assert value == result + + +def test_datetime_fraction_rounded(cursor): + # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc + # rounds down to what the database supports. + + full = datetime(2007, 1, 15, 3, 4, 5, 123456) + rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) + + cursor.execute("create table t1(dt datetime)") + cursor.execute("insert into t1 values (?)", full) + + result = cursor.execute("select dt from t1").fetchone()[0] + assert isinstance(result, datetime) + assert rounded == result + + +def test_datetime2(cursor): + value = datetime(2007, 1, 15, 3, 4, 5) + + cursor.execute("create table t1(dt datetime2)") + cursor.execute("insert into t1 values (?)", value) + + result = cursor.execute("select dt from t1").fetchone()[0] + assert isinstance(result, datetime) + assert value == result + + +def test_sp_results(cursor): + cursor.execute( + """ + Create procedure proc1 + AS + select top 10 name, id, xtype, refdate + from sysobjects + """) + rows = cursor.execute("exec proc1").fetchall() + assert isinstance(rows, list) + assert len(rows) == 10 # there has to be at least 10 items in sysobjects + assert isinstance(rows[0].refdate, datetime) + + +def test_sp_results_from_temp(cursor): + + # Note: I've used "set nocount on" so that we don't get the number of rows deleted from + # #tmptable. If you don't do this, you'd need to call nextset() once to skip it. + + cursor.execute( + """ + Create procedure proc1 + AS + set nocount on + select top 10 name, id, xtype, refdate + into #tmptable + from sysobjects + + select * from #tmptable + """) + cursor.execute("exec proc1") + assert cursor.description is not None + assert len(cursor.description) == 4 + + rows = cursor.fetchall() + assert isinstance(rows, list) + assert len(rows) == 10 # there has to be at least 10 items in sysobjects + assert isinstance(rows[0].refdate, datetime) + + +def test_sp_results_from_vartbl(cursor): + cursor.execute( + """ + Create procedure proc1 + AS + set nocount on + declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime) + + insert into @tmptbl + select top 10 name, id, xtype, refdate + from sysobjects + + select * from @tmptbl + """) + cursor.execute("exec proc1") + rows = cursor.fetchall() + assert isinstance(rows, list) + assert len(rows) == 10 # there has to be at least 10 items in sysobjects + assert isinstance(rows[0].refdate, datetime) + + +def test_sp_with_dates(cursor): + # Reported in the forums that passing two datetimes to a stored procedure doesn't work. + cursor.execute( + """ + if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') + and OBJECTPROPERTY(id, N'IsProcedure') = 1) + drop procedure [dbo].[test_sp] + """) + cursor.execute( + """ + create procedure test_sp(@d1 datetime, @d2 datetime) + AS + declare @d as int + set @d = datediff(year, @d1, @d2) + select @d + """) + cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) + rows = cursor.fetchall() + assert rows is not None + assert rows[0][0] == 0 # 0 years apart + + +def test_sp_with_none(cursor): + # Reported in the forums that passing None caused an error. + cursor.execute( + """ + if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') + and OBJECTPROPERTY(id, N'IsProcedure') = 1) + drop procedure [dbo].[test_sp] + """) + cursor.execute( + """ + create procedure test_sp(@x varchar(20)) + AS + declare @y varchar(20) + set @y = @x + select @y + """) + cursor.execute("exec test_sp ?", None) + rows = cursor.fetchall() + assert rows is not None + assert rows[0][0] is None # 0 years apart + + +# +# rowcount +# + + +def test_rowcount_delete(cursor): + assert cursor.rowcount == -1 + cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + cursor.execute("insert into t1 values (?)", i) + cursor.execute("delete from t1") + assert cursor.rowcount == count + + +def test_rowcount_nodata(cursor): + """ + This represents a different code path than a delete that deleted something. + + The return value is SQL_NO_DATA and code after it was causing an error. We could use + SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount + code. On the other hand, we could hardcode a zero return value. + """ + cursor.execute("create table t1(i int)") + # This is a different code path internally. + cursor.execute("delete from t1") + assert cursor.rowcount == 0 + + +def test_rowcount_select(cursor): + """ + Ensure Cursor.rowcount is set properly after a select statement. + + pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 + returns -1 after a select statement, so we'll test for that behavior. This is valid + behavior according to the DB API specification, but people don't seem to like it. + """ + cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + cursor.execute("insert into t1 values (?)", i) + cursor.execute("select * from t1") + assert cursor.rowcount == -1 + + rows = cursor.fetchall() + assert len(rows) == count + assert cursor.rowcount == -1 + + +def test_rowcount_reset(cursor): + "Ensure rowcount is reset after DDL" + cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + cursor.execute("insert into t1 values (?)", i) + assert cursor.rowcount == 1 + + cursor.execute("create table t2(i int)") + ddl_rowcount = (0 if IS_FREEDTS else -1) + assert cursor.rowcount == ddl_rowcount + + +def test_retcursor_delete(cursor): + cursor.execute("create table t1(i int)") + cursor.execute("insert into t1 values (1)") + v = cursor.execute("delete from t1") + assert v == cursor + + +def test_retcursor_nodata(cursor): + """ + This represents a different code path than a delete that deleted something. + + The return value is SQL_NO_DATA and code after it was causing an error. We could use + SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount + code. + """ + cursor.execute("create table t1(i int)") + # This is a different code path internally. + v = cursor.execute("delete from t1") + assert v == cursor + + +def test_retcursor_select(cursor): + cursor.execute("create table t1(i int)") + cursor.execute("insert into t1 values (1)") + v = cursor.execute("select * from t1") + assert v == cursor + + +def table_with_spaces(cursor): + "Ensure we can select using [x z] syntax" + + try: + cursor.execute("create table [test one](int n)") + cursor.execute("insert into [test one] values(1)") + cursor.execute("select * from [test one]") + v = cursor.fetchone()[0] + assert v == 1 + finally: + cursor.rollback() + + +def test_lower_case(): + "Ensure pyodbc.lowercase forces returned column names to lowercase." + try: + pyodbc.lowercase = True + cnxn = connect() + cursor = cnxn.cursor() + + cursor.execute("create table t1(Abc int, dEf int)") + cursor.execute("select * from t1") + + names = [t[0] for t in cursor.description] + names.sort() + + assert names == ["abc", "def"] + finally: + # Put it back so other tests don't fail. + pyodbc.lowercase = False + + +def test_row_description(cursor): + """ + Ensure Cursor.description is accessible as Row.cursor_description. + """ + cursor.execute("create table t1(a int, b char(3))") + cursor.execute("insert into t1 values(1, 'abc')") + row = cursor.execute("select * from t1").fetchone() + assert cursor.description == row.cursor_description + + +def test_temp_select(cursor): + # A project was failing to create temporary tables via select into. + cursor.execute("create table t1(s char(7))") + cursor.execute("insert into t1 values(?)", "testing") + v = cursor.execute("select * from t1").fetchone()[0] + assert isinstance(v, str) + assert v == "testing" + + cursor.execute("select s into t2 from t1") + v = cursor.execute("select * from t1").fetchone()[0] + assert isinstance(v, str) + assert v == "testing" + + +def test_executemany(cursor): + cursor.execute("create table t1(a int, b varchar(10))") + + params = [(i, str(i)) for i in range(1, 6)] + + cursor.executemany("insert into t1(a, b) values (?,?)", params) + + count = cursor.execute("select count(*) from t1").fetchone()[0] + assert count == len(params) + + cursor.execute("select a, b from t1 order by a") + rows = cursor.fetchall() + assert count == len(rows) + + for param, row in zip(params, rows): + assert param[0] == row[0] + assert param[1] == row[1] + + +def test_executemany_one(cursor): + "Pass executemany a single sequence" + cursor.execute("create table t1(a int, b varchar(10))") + + params = [(1, "test")] + + cursor.executemany("insert into t1(a, b) values (?,?)", params) + + count = cursor.execute("select count(*) from t1").fetchone()[0] + assert count == len(params) + + cursor.execute("select a, b from t1 order by a") + rows = cursor.fetchall() + assert count == len(rows) + + for param, row in zip(params, rows): + assert param[0] == row[0] + assert param[1] == row[1] + + +# def test_executemany_dae_0(cursor): +# """ +# DAE for 0-length value +# """ +# cursor.execute("create table t1(a nvarchar(max))") + +# cursor.fast_executemany = True +# cursor.executemany("insert into t1(a) values(?)", [['']]) + +# assert cursor.execute("select a from t1").fetchone()[0] == '' + +# cursor.fast_executemany = False + + +def test_executemany_failure(cursor): + """ + Ensure that an exception is raised if one query in an executemany fails. + """ + cursor.execute("create table t1(a int, b varchar(10))") + + params = [(1, 'good'), + ('error', 'not an int'), + (3, 'good')] + + with pytest.raises(pyodbc.Error): + cursor.executemany("insert into t1(a, b) value (?, ?)", params) + + +def test_row_slicing(cursor): + cursor.execute("create table t1(a int, b int, c int, d int)") + cursor.execute("insert into t1 values(1,2,3,4)") + + row = cursor.execute("select * from t1").fetchone() + + result = row[:] + assert result is row + + result = row[:-1] + assert result == (1, 2, 3) + + result = row[0:4] + assert result is row + + +def test_row_repr(cursor): + cursor.execute("create table t1(a int, b int, c int, d varchar(50))") + cursor.execute("insert into t1 values(1,2,3,'four')") + + row = cursor.execute("select * from t1").fetchone() + + result = str(row) + assert result == "(1, 2, 3, 'four')" + + result = str(row[:-1]) + assert result == "(1, 2, 3)" + + result = str(row[:1]) + assert result == "(1,)" + + +def test_concatenation(cursor): + v2 = '0123456789' * 30 + v3 = '9876543210' * 30 + + cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") + cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) + + row = cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() + + assert row.both == v2 + v3 + + +def test_view_select(cursor): + # Reported in forum: Can't select from a view? I think I do this a lot, but another test + # never hurts. + + # Create a table (t1) with 3 rows and a view (t2) into it. + cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") + for i in range(3): + cursor.execute("insert into t1(c2) values (?)", f"string{i}") + cursor.execute("create view t2 as select * from t1") + + # Select from the view + cursor.execute("select * from t2") + rows = cursor.fetchall() + assert rows is not None + assert len(rows) == 3 + + +def test_autocommit(): + cnxn = connect() + assert cnxn.autocommit is False + cnxn = None + + cnxn = connect(autocommit=True) + assert cnxn.autocommit is True + cnxn.autocommit = False + assert cnxn.autocommit is False + + +def test_sqlserver_callproc(cursor): + try: + cursor.execute("drop procedure pyodbctest") + cursor.commit() + except: + pass + + cursor.execute("create table t1(s varchar(10))") + cursor.execute("insert into t1 values(?)", "testing") + + cursor.execute(""" + create procedure pyodbctest @var1 varchar(32) + as + begin + select s from t1 + return + end + """) + + cursor.execute("exec pyodbctest 'hi'") + + +def test_skip(cursor): + # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. + + cursor.execute("create table t1(id int)") + for i in range(1, 5): + cursor.execute("insert into t1 values(?)", i) + cursor.execute("select id from t1 order by id") + assert cursor.fetchone()[0] == 1 + cursor.skip(2) + assert cursor.fetchone()[0] == 4 + + +def test_timeout(): + cnxn = connect() + assert cnxn.timeout == 0 # defaults to zero (off) + + cnxn.timeout = 30 + assert cnxn.timeout == 30 + + cnxn.timeout = 0 + assert cnxn.timeout == 0 + + +def test_sets_execute(cursor): + # Only lists and tuples are allowed. + cursor.execute("create table t1 (word varchar (100))") + + words = {'a', 'b', 'c'} + + with pytest.raises(pyodbc.ProgrammingError): + cursor.execute("insert into t1 (word) values (?)", words) + + with pytest.raises(pyodbc.ProgrammingError): + cursor.executemany("insert into t1 (word) values (?)", words) + + +def test_row_execute(cursor): + "Ensure we can use a Row object as a parameter to execute" + cursor.execute("create table t1(n int, s varchar(10))") + cursor.execute("insert into t1 values (1, 'a')") + row = cursor.execute("select n, s from t1").fetchone() + assert row + + cursor.execute("create table t2(n int, s varchar(10))") + cursor.execute("insert into t2 values (?, ?)", row) + + +def test_row_executemany(cursor): + "Ensure we can use a Row object as a parameter to executemany" + cursor.execute("create table t1(n int, s varchar(10))") + + for i in range(3): + cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a') + i)) + + rows = cursor.execute("select n, s from t1").fetchall() + assert len(rows) != 0 + + cursor.execute("create table t2(n int, s varchar(10))") + cursor.executemany("insert into t2 values (?, ?)", rows) + + +def test_description(cursor): + "Ensure cursor.description is correct" + + cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") + cursor.execute("insert into t1 values (1, 'abc', '1.23')") + cursor.execute("select * from t1") + + # (I'm not sure the precision of an int is constant across different versions, bits, so I'm + # hand checking the items I do know. + + # int + t = cursor.description[0] + assert t[0] == 'n' + assert t[1] == int + assert t[5] == 0 # scale + assert t[6] is True # nullable + + # varchar(8) + t = cursor.description[1] + assert t[0] == 's' + assert t[1] == str + assert t[4] == 8 # precision + assert t[5] == 0 # scale + assert t[6] is True # nullable + + # decimal(5, 2) + t = cursor.description[2] + assert t[0] == 'd' + assert t[1] == Decimal + assert t[4] == 5 # precision + assert t[5] == 2 # scale + assert t[6] is True # nullable + + +def test_cursor_messages_with_print(cursor): + """ + Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement. + """ + assert not cursor.messages + + # SQL Server PRINT statements are never more than 8000 characters + # https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks + for msg in ('hello world', 'ABCDEFGHIJ' * 800): + cursor.execute(f"PRINT '{msg}'") + messages = cursor.messages + assert isinstance(messages, list) + assert len(messages) == 1 + assert isinstance(messages[0], tuple) + assert len(messages[0]) == 2 + assert isinstance(messages[0][0], str) + assert isinstance(messages[0][1], str) + assert '[01000] (0)' == messages[0][0] + assert messages[0][1].endswith(msg) + + +def test_cursor_messages_with_stored_proc(cursor): + """ + Complex scenario to test the Cursor.messages attribute. + """ + cursor.execute(""" + create or alter procedure test_cursor_messages as + begin + set nocount on; + print 'Message 1a'; + print 'Message 1b'; + select N'Field 1a' AS F UNION ALL SELECT N'Field 1b'; + select N'Field 2a' AS F UNION ALL SELECT N'Field 2b'; + print 'Message 2a'; + print 'Message 2b'; + end + """) + + # The messages will look like: + # + # [Microsoft][ODBC Driver 18 for SQL Server][SQL Server]Message 1a + + # result set 1: messages, rows + cursor.execute("exec test_cursor_messages") + vals = [row[0] for row in cursor.fetchall()] + assert vals == ['Field 1a', 'Field 1b'] + msgs = [ + re.search(r'Message \d[ab]$', m[1]).group(0) + for m in cursor.messages + ] + assert msgs == ['Message 1a', 'Message 1b'] + + # result set 2: rows, no messages + assert cursor.nextset() + vals = [row[0] for row in cursor.fetchall()] + assert vals == ['Field 2a', 'Field 2b'] + assert not cursor.messages + + # result set 3: messages, no rows + assert cursor.nextset() + with pytest.raises(pyodbc.ProgrammingError): + cursor.fetchall() + msgs = [ + re.search(r'Message \d[ab]$', m[1]).group(0) + for m in cursor.messages + ] + assert msgs == ['Message 2a', 'Message 2b'] + + # result set 4: no rows, no messages + assert not cursor.nextset() + with pytest.raises(pyodbc.ProgrammingError): + cursor.fetchall() + assert not cursor.messages + + +def test_none_param(cursor): + "Ensure None can be used for params other than the first" + # Some driver/db versions would fail if NULL was not the first parameter because + # SQLDescribeParam (only used with NULL) could not be used after the first call to + # SQLBindParameter. This means None always worked for the first column, but did not work + # for later columns. + # + # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. + # However, binary/varbinary won't allow an implicit conversion. + + cursor.execute("create table t1(n int, blob varbinary(max))") + cursor.execute("insert into t1 values (1, newid())") + row = cursor.execute("select * from t1").fetchone() + assert row.n == 1 + assert isinstance(row.blob, bytes) + + sql = "update t1 set n=?, blob=?" + try: + cursor.execute(sql, 2, None) + except pyodbc.DataError: + if IS_FREEDTS: + # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so pyodbc + # can't call SQLDescribeParam to get the correct parameter type. This can lead to + # errors being returned from SQL Server when sp_prepexec is called, e.g., "Implicit + # conversion from data type varchar to varbinary(max) is not allowed." + # + # So at least verify that the user can manually specify the parameter type + cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) + cursor.execute(sql, 2, None) + else: + raise + row = cursor.execute("select * from t1").fetchone() + assert row.n == 2 + assert row.blob is None + + +def test_output_conversion(): + def convert1(value): + # The value is the raw bytes (as a bytes object) read from the + # database. We'll simply add an X at the beginning at the end. + return 'X' + value.decode('latin1') + 'X' + + def convert2(value): + # Same as above, but add a Y at the beginning at the end. + return 'Y' + value.decode('latin1') + 'Y' + + cnxn = connect() + cursor = cnxn.cursor() + + cursor.execute("create table t1(n int, v varchar(10))") + cursor.execute("insert into t1 values (1, '123.45')") + + cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'X123.45X' + + # Clear all conversions and try again. There should be no Xs this time. + cnxn.clear_output_converters() + value = cursor.execute("select v from t1").fetchone()[0] + assert value == '123.45' + + # Same but clear using remove_output_converter. + cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'X123.45X' + + cnxn.remove_output_converter(pyodbc.SQL_VARCHAR) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == '123.45' + + # Clear via add_output_converter, passing None for the converter function. + cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'X123.45X' + + cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == '123.45' + + # retrieve and temporarily replace converter (get_output_converter) + # + # case_1: converter already registered + cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'X123.45X' + prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR) + assert prev_converter is not None + cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'Y123.45Y' + cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'X123.45X' + # + # case_2: no converter already registered + cnxn.clear_output_converters() + value = cursor.execute("select v from t1").fetchone()[0] + assert value == '123.45' + prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR) + assert prev_converter is None + cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'Y123.45Y' + cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == '123.45' + + +def test_too_large(cursor): + """Ensure error raised if insert fails due to truncation""" + value = 'x' * 1000 + cursor.execute("create table t1(s varchar(800))") + + with pytest.raises(pyodbc.Error): + cursor.execute("insert into t1 values (?)", value) + + +@pytest.mark.skipif(sys.platform.startswith('linux'), + reason='SQL Server Linux does not support -151 yet') +def test_geometry_null_insert(cursor): + cnxn = connect() + + def convert(value): + return value + + cnxn.add_output_converter(-151, convert) # -151 is SQL Server's geometry + cursor.execute("create table t1(n int, v geometry)") + cursor.execute("insert into t1 values (?, ?)", 1, None) + value = cursor.execute("select v from t1").fetchone()[0] + assert value is None + cnxn.clear_output_converters() + + +def test_row_equal(cursor): + cursor.execute("create table t1(n int, s varchar(20))") + cursor.execute("insert into t1 values (1, 'test')") + row1 = cursor.execute("select n, s from t1").fetchone() + row2 = cursor.execute("select n, s from t1").fetchone() + assert row1 == row2 + + +def test_row_gtlt(cursor): + cursor.execute("create table t1(n int, s varchar(20))") + cursor.execute("insert into t1 values (1, 'test1')") + cursor.execute("insert into t1 values (1, 'test2')") + rows = cursor.execute("select n, s from t1 order by s").fetchall() + assert rows[0] < rows[1] + assert rows[0] <= rows[1] + assert rows[1] > rows[0] + assert rows[1] >= rows[0] + assert rows[0] != rows[1] + + rows = list(rows) + rows.sort() # uses < + + +def test_context_manager_success(): + "Ensure `with` commits if an exception is not raised" + cnxn = connect() + cursor = cnxn.cursor() + + cursor.execute("create table t1(n int)") + cnxn.commit() + + with cnxn: + cursor.execute("insert into t1 values (1)") + + rows = cursor.execute("select n from t1").fetchall() + assert len(rows) == 1 + assert rows[0][0] == 1 + + +def test_context_manager_failure(cursor): + "Ensure `with` rolls back if an exception is raised" + cnxn = connect() + cursor = cnxn.cursor() + + # We'll insert a row and commit it. Then we'll insert another row followed by an + # exception. + + cursor.execute("create table t1(n int)") + cursor.execute("insert into t1 values (1)") + cnxn.commit() + + with pytest.raises(pyodbc.Error): + with cnxn: + cursor.execute("insert into t1 values (2)") + cursor.execute("delete from bogus") + + cursor.execute("select max(n) from t1") + val = cursor.fetchval() + assert val == 1 + + +def test_untyped_none(cursor): + # From issue 129 + value = cursor.execute("select ?", None).fetchone()[0] + assert value is None + + +def test_large_update_nodata(cursor): + cursor.execute('create table t1(a varbinary(max))') + hundredkb = b'x' * 100 * 1024 + cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) + + +def test_func_param(cursor): + try: + cursor.execute("drop function func1") + except: + pass + cursor.execute(""" + create function func1 (@testparam varchar(4)) + returns @rettest table (param varchar(4)) + as + begin + insert @rettest + select @testparam + return + end + """) + cursor.commit() + value = cursor.execute("select * from func1(?)", 'test').fetchone()[0] + assert value == 'test' + + +def test_columns(cursor): + # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error + # + # Error: TypeError: argument 2 must be str, not None + # + # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an + # optional string keyword when calling indirectly. + + cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))") + + cursor.columns('t1') + results = {row.column_name: row for row in cursor} + row = results['a'] + assert row.type_name == 'int', row.type_name + row = results['b'] + assert row.type_name == 'varchar' + assert row.column_size == 3 + + # Now do the same, but specifically pass in None to one of the keywords. Old versions + # were parsing arguments incorrectly and would raise an error. (This crops up when + # calling indirectly like columns(*args, **kwargs) which aiodbc does.) + + cursor.columns('t1', schema=None, catalog=None) + results = {row.column_name: row for row in cursor} + row = results['a'] + assert row.type_name == 'int', row.type_name + row = results['b'] + assert row.type_name == 'varchar' + assert row.column_size == 3 + row = results['xΏz'] + assert row.type_name == 'varchar' + assert row.column_size == 4, row.column_size + + for i in range(8, 16): + table_name = 'pyodbc_89abcdef'[:i] + + cursor.execute(f""" + IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name}; + CREATE TABLE {table_name} (id INT PRIMARY KEY); + """) + + col_count = len([col.column_name for col in cursor.columns(table_name)]) + assert col_count == 1 + + cursor.execute(f"drop table {table_name}") + + +def test_cancel(cursor): + # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with + # making sure SQLCancel is called correctly. + cursor.execute("select 1") + cursor.cancel() + + +def test_emoticons_as_parameter(cursor): + # https://github.com/mkleehammer/pyodbc/issues/423 + # + # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number + # of characters. Ensure it works even with 4-byte characters. + # + # http://www.fileformat.info/info/unicode/char/1f31c/index.htm + + v = "x \U0001F31C z" + + cursor.execute("create table t1(s nvarchar(100))") + cursor.execute("insert into t1 values (?)", v) + + result = cursor.execute("select s from t1").fetchone()[0] + + assert result == v + + +def test_emoticons_as_literal(cursor): + # similar to `test_emoticons_as_parameter`, above, except for Unicode literal + # + # http://www.fileformat.info/info/unicode/char/1f31c/index.htm + + # FreeTDS ODBC issue fixed in version 1.1.23 + # https://github.com/FreeTDS/freetds/issues/317 + + v = "x \U0001F31C z" + + cursor.execute("create table t1(s nvarchar(100))") + cursor.execute("insert into t1 values (N'%s')" % v) + + result = cursor.execute("select s from t1").fetchone()[0] + + assert result == v + + +def _test_tvp(cursor, diff_schema): + # https://github.com/mkleehammer/pyodbc/issues/290 + # + # pyodbc supports queries with table valued parameters in sql server + + procname = 'SelectTVP' + typename = 'TestTVP' + + if diff_schema: + schemaname = 'myschema' + procname = schemaname + '.' + procname + typenameonly = typename + typename = schemaname + '.' + typename + + # (Don't use "if exists" since older SQL Servers don't support it.) + try: + cursor.execute("drop procedure " + procname) + except: + pass + try: + cursor.execute("drop type " + typename) + except: + pass + if diff_schema: + try: + cursor.execute("drop schema " + schemaname) + except: + pass + cursor.commit() + + if diff_schema: + cursor.execute("CREATE SCHEMA myschema") + cursor.commit() + + cursor.execute( + f""" + CREATE TYPE {typename} AS TABLE( + c01 VARCHAR(255), + c02 VARCHAR(MAX), + c03 VARBINARY(255), + c04 VARBINARY(MAX), + c05 BIT, + c06 DATE, + c07 TIME, + c08 DATETIME2(5), + c09 BIGINT, + c10 FLOAT, + c11 NUMERIC(38, 24), + c12 UNIQUEIDENTIFIER) + """) + cursor.commit() + cursor.execute( + f""" + CREATE PROCEDURE {procname} @TVP {typename} READONLY + AS SELECT * FROM @TVP; + """) + cursor.commit() + + long_string = '' + long_bytearray = [] + for i in range(255): + long_string += chr((i % 95) + 32) + long_bytearray.append(i % 255) + + very_long_string = '' + very_long_bytearray = [] + for i in range(2000000): + very_long_string += chr((i % 95) + 32) + very_long_bytearray.append(i % 255) + + c01 = ['abc', '', long_string] + + c02 = ['abc', '', very_long_string] + + c03 = [bytearray([0xD1, 0xCE, 0xFA, 0xCE]), + bytearray([0x00, 0x01, 0x02, 0x03, 0x04]), + bytearray(long_bytearray)] + + c04 = [bytearray([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), + bytearray([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), + bytearray(very_long_bytearray)] + + c05 = [1, 0, 1] + + c06 = [date(1997, 8, 29), + date(1, 1, 1), + date(9999, 12, 31)] + + c07 = [time(9, 13, 39), + time(0, 0, 0), + time(23, 59, 59)] + + c08 = [datetime(2018, 11, 13, 13, 33, 26, 298420), + datetime(1, 1, 1, 0, 0, 0, 0), + datetime(9999, 12, 31, 23, 59, 59, 999990)] + + c09 = [1234567, -9223372036854775808, 9223372036854775807] + + c10 = [3.14, -1.79E+308, 1.79E+308] + + c11 = [Decimal('31234567890123.141243449787580175325274'), + Decimal('0.000000000000000000000001'), + Decimal('99999999999999.999999999999999999999999')] + + c12 = ['4FE34A93-E574-04CC-200A-353F0D1770B1', + '33F7504C-2BAC-1B83-01D1-7434A7BA6A17', + 'FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF'] + + param_array = [] + + for i in range(3): + param_array.append([c01[i], c02[i], c03[i], c04[i], c05[i], c06[i], c07[i], c08[i], + c09[i], c10[i], c11[i], c12[i]]) + + success = True + + try: + p1 = [param_array] + if diff_schema: + p1 = [[typenameonly, schemaname] + param_array] + result_array = cursor.execute(f"exec {procname} ?", p1).fetchall() + except Exception as ex: + print("Failed to execute SelectTVP") + print("Exception: [" + type(ex).__name__ + "]", ex.args) + + success = False + else: + for r in range(len(result_array)): + for c in range(len(result_array[r])): + if result_array[r][c] != param_array[r][c]: + print("Mismatch at row", r + 1, ", column ", (c + 1) + "; expected:", + param_array[r][c], "received:", result_array[r][c]) + success = False + + try: + p1 = [[]] + if diff_schema: + p1 = [[typenameonly, schemaname] + []] + result_array = cursor.execute(f"exec {procname} ?", p1).fetchall() + assert result_array == [] + except Exception as ex: + print("Failed to execute SelectTVP") + print("Exception: [" + type(ex).__name__ + "]", ex.args) + success = False + + assert success + + +# REVIEW: I need to research this. +# @pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP') +@pytest.mark.skip(reason='TVP test hangs') +def test_tvp(cursor): + _test_tvp(cursor, False) + + +# REVIEW: I need to research this. +# @pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP') +@pytest.mark.skip(reason='TVP test hangs') +def test_tvp_diffschema(cursor): + _test_tvp(cursor, True) + + +def get_sqlserver_version(cursor): + + """ + Returns the major version: 8-->2000, 9-->2005, 10-->2008 + """ + cursor.execute("exec master..xp_msver 'ProductVersion'") + row = cursor.fetchone() + return int(row.Character_Value.split('.', 1)[0]) + + +@lru_cache() +def _generate_str(length, encoding=None): + """ + Returns either a string or bytes, depending on whether encoding is provided, + that is `length` elements long. + + If length is None, None is returned. This simplifies the tests by letting us put None into + an array of other lengths and pass them here, moving the special case check into one place. + """ + if length is None: + return None + + # Put non-ASCII characters at the front so we don't end up chopping one in half in a + # multi-byte encoding like UTF-8. + + v = 'á' + + remaining = max(0, length - len(v)) + if remaining: + seed = '0123456789-abcdefghijklmnopqrstuvwxyz-' + + if remaining <= len(seed): + v += seed + else: + c = (remaining + len(seed) - 1 // len(seed)) + v += seed * c + + if encoding: + v = v.encode(encoding) + + # We chop *after* encoding because if we are encoding then we want bytes. + v = v[:length] + + return v diff --git a/tests2/accesstests.py b/tests2/accesstests.py deleted file mode 100755 index 2cd0fc55..00000000 --- a/tests2/accesstests.py +++ /dev/null @@ -1,671 +0,0 @@ -#!/usr/bin/python - -usage="""\ -usage: %prog [options] filename - -Unit tests for Microsoft Access - -These run using the version from the 'build' directory, not the version -installed into the Python directories. You must run python setup.py build -before running the tests. - -To run, pass the file EXTENSION of an Access database on the command line: - - accesstests accdb - -An empty Access 2000 database (empty.mdb) or an empty Access 2007 database -(empty.accdb), are automatically created for the tests. - -To run a single test, use the -t option: - - accesstests -t unicode_null accdb - -If you want to report an error, it would be helpful to include the driver information -by using the verbose flag and redirecting the output to a file: - - accesstests -v accdb >& results.txt - -You can pass the verbose flag twice for more verbose output: - - accesstests -vv accdb -""" - -# Access SQL data types: http://msdn2.microsoft.com/en-us/library/bb208866.aspx - -import sys, os, re -import unittest -from decimal import Decimal -from datetime import datetime, date, time -from os.path import abspath, dirname, join -import shutil -from testutils import * - -CNXNSTRING = None - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of composed of `seed` to make a string `length` characters long. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - - -class AccessTestCase(unittest.TestCase): - - SMALL_FENCEPOST_SIZES = [ 0, 1, 254, 255 ] # text fields <= 255 - LARGE_FENCEPOST_SIZES = [ 256, 270, 304, 508, 510, 511, 512, 1023, 1024, 2047, 2048, 4000, 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] - - ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] - UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] - IMAGE_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] - - def __init__(self, method_name): - unittest.TestCase.__init__(self, method_name) - - def setUp(self): - self.cnxn = pyodbc.connect(CNXNSTRING) - self.cursor = self.cnxn.cursor() - - # https://docs.microsoft.com/en-us/sql/odbc/microsoft/desktop-database-driver-performance-issues?view=sql-server-2017 - # - # As of the 4.0 drivers, you have to send as Unicode? - self.cnxn.setencoding(str, encoding='utf-16le') - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_multiple_bindings(self): - "More than one bind and select on a cursor" - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t1 values (?)", 2) - self.cursor.execute("insert into t1 values (?)", 3) - for i in range(3): - self.cursor.execute("select n from t1 where n < ?", 10) - self.cursor.execute("select n from t1 where n < 3") - - - def test_different_bindings(self): - self.cursor.execute("create table t1(n int)") - self.cursor.execute("create table t2(d datetime)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t2 values (?)", datetime.now()) - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - def _test_strtype(self, sqltype, value, resulttype=None, colsize=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize is None or (value is None or colsize >= len(value)), 'colsize=%s value=%s' % (colsize, (value is None) and 'none' or len(value)) - - if colsize: - sql = "create table t1(n1 int not null, s1 %s(%s), s2 %s(%s))" % (sqltype, colsize, sqltype, colsize) - else: - sql = "create table t1(n1 int not null, s1 %s, s2 %s)" % (sqltype, sqltype) - - if resulttype is None: - # Access only uses Unicode, but strings might have been passed in to see if they can be written. When we - # read them back, they'll be unicode, so compare our results to a Unicode version of `value`. - if type(value) is str: - resulttype = unicode - else: - resulttype = type(value) - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(1, ?, ?)", (value, value)) - v = self.cursor.execute("select s1, s2 from t1").fetchone()[0] - - if type(value) is not resulttype: - # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before - # comparing. - value = resulttype(value) - - self.assertEqual(type(v), resulttype) - - if value is not None: - self.assertEqual(len(v), len(value)) - - self.assertEqual(v, value) - - # - # unicode - # - - def test_unicode_null(self): - self._test_strtype('varchar', None, colsize=255) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, colsize=len(value)) - t.__doc__ = 'unicode %s' % len(value) - return t - for value in UNICODE_FENCEPOSTS: - locals()['test_unicode_%s' % len(value)] = _maketest(value) - - # - # ansi -> varchar - # - - # Access only stores Unicode text but it should accept ASCII text. - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, colsize=len(value)) - t.__doc__ = 'ansi %s' % len(value) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_ansivarchar_%s' % len(value)] = _maketest(value) - - # - # binary - # - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varbinary', buffer(value), colsize=len(value), resulttype=pyodbc.BINARY) - t.__doc__ = 'binary %s' % len(value) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_binary_%s' % len(value)] = _maketest(value) - - - # - # image - # - - def test_null_image(self): - self._test_strtype('image', None) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('image', buffer(value), resulttype=pyodbc.BINARY) - t.__doc__ = 'image %s' % len(value) - return t - for value in IMAGE_FENCEPOSTS: - locals()['test_image_%s' % len(value)] = _maketest(value) - - # - # memo - # - - def test_null_memo(self): - self._test_strtype('memo', None) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('memo', unicode(value)) - t.__doc__ = 'Unicode to memo %s' % len(value) - return t - for value in IMAGE_FENCEPOSTS: - locals()['test_memo_%s' % len(value)] = _maketest(value) - - # ansi -> memo - def _maketest(value): - def t(self): - self._test_strtype('memo', value) - t.__doc__ = 'ANSI to memo %s' % len(value) - return t - for value in IMAGE_FENCEPOSTS: - locals()['test_ansimemo_%s' % len(value)] = _maketest(value) - - def test_subquery_params(self): - """Ensure parameter markers work in a subquery""" - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - row = self.cursor.execute(""" - select x.id - from ( - select id - from t1 - where s = ? - and id between ? and ? - ) x - """, 'test', 1, 10).fetchone() - self.assertNotEqual(row, None) - self.assertEqual(row[0], 1) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - - def test_unicode_query(self): - self.cursor.execute(u"select 1") - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - # - # date, time, datetime - # - - def test_datetime(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(value, result) - - # - # ints and floats - # - - def test_int(self): - value = 1234 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_int(self): - value = -1 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_smallint(self): - value = 32767 - self.cursor.execute("create table t1(n smallint)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_real(self): - value = 1234.5 - self.cursor.execute("create table t1(n real)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_real(self): - value = -200.5 - self.cursor.execute("create table t1(n real)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - def test_float(self): - value = 1234.567 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_float(self): - value = -200.5 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - def test_tinyint(self): - self.cursor.execute("create table t1(n tinyint)") - value = 10 - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(type(result), type(value)) - self.assertEqual(value, result) - - # - # decimal & money - # - - def test_decimal(self): - value = Decimal('12345.6789') - self.cursor.execute("create table t1(n numeric(10,4))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - def test_money(self): - self.cursor.execute("create table t1(n money)") - value = Decimal('1234.45') - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(type(result), type(value)) - self.assertEqual(value, result) - - def test_negative_decimal_scale(self): - value = Decimal('-10.0010') - self.cursor.execute("create table t1(d numeric(19,4))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - # - # bit - # - - def test_bit(self): - self.cursor.execute("create table t1(b bit)") - - value = True - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select b from t1").fetchone()[0] - self.assertEqual(type(result), bool) - self.assertEqual(value, result) - - def test_bit_null(self): - self.cursor.execute("create table t1(b bit)") - - value = None - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select b from t1").fetchone()[0] - self.assertEqual(type(result), bool) - self.assertEqual(False, result) - - def test_guid(self): - value = u"de2ac9c6-8676-4b0b-b8a6-217a8580cbee" - self.cursor.execute("create table t1(g1 uniqueidentifier)") - self.cursor.execute("insert into t1 values (?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), type(value)) - self.assertEqual(len(v), len(value)) - - - # - # rowcount - # - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - """ - Ensure Cursor.rowcount is set properly after a select statement. - - pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a - select statement, so we'll test for that behavior. This is valid behavior according to the DB API - specification, but people don't seem to like it. - """ - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, -1) - - rows = self.cursor.fetchall() - self.assertEqual(len(rows), count) - self.assertEqual(self.cursor.rowcount, -1) - - def test_rowcount_reset(self): - "Ensure rowcount is reset to -1" - - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.assertEqual(self.cursor.rowcount, 1) - - self.cursor.execute("create table t2(i int)") - self.assertEqual(self.cursor.rowcount, -1) - - # - # Misc - # - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_failure(self): - """ - Ensure that an exception is raised if one query in an executemany fails. - """ - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, 'good'), - ('error', 'not an int'), - (3, 'good') ] - - self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_concatenation(self): - v2 = u'0123456789' * 25 - v3 = u'9876543210' * 25 - value = v2 + 'x' + v3 - - self.cursor.execute("create table t1(c2 varchar(250), c3 varchar(250))") - self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) - - row = self.cursor.execute("select c2 + 'x' + c3 from t1").fetchone() - - self.assertEqual(row[0], value) - - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - - othercnxn = pyodbc.connect(CNXNSTRING, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - -def main(): - from optparse import OptionParser - parser = OptionParser(usage=usage) - parser.add_option("-v", "--verbose", default=0, action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - - (options, args) = parser.parse_args() - - if len(args) != 1: - parser.error('dbfile argument required') - - if args[0].endswith('.accdb'): - driver = 'Microsoft Access Driver (*.mdb, *.accdb)' - drvext = 'accdb' - else: - driver = 'Microsoft Access Driver (*.mdb)' - drvext = 'mdb' - - here = dirname(abspath(__file__)) - src = join(here, 'empty.' + drvext) - dest = join(here, 'test.' + drvext) - shutil.copy(src, dest) - - global CNXNSTRING - CNXNSTRING = 'DRIVER={%s};DBQ=%s;ExtendedAnsiSQL=1' % (driver, dest) - print(CNXNSTRING) - - if options.verbose: - cnxn = pyodbc.connect(CNXNSTRING) - print_library_info(cnxn) - cnxn.close() - - suite = load_tests(AccessTestCase, options.test) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - add_to_path() - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/dbapi20.py b/tests2/dbapi20.py deleted file mode 100755 index 94567db8..00000000 --- a/tests2/dbapi20.py +++ /dev/null @@ -1,850 +0,0 @@ -#!/usr/bin/env python -''' Python DB API 2.0 driver compliance unit test suite. - - This software is Public Domain and may be used without restrictions. - - "Now we have booze and barflies entering the discussion, plus rumours of - DBAs on drugs... and I won't tell you what flashes through my mind each - time I read the subject line with 'Anal Compliance' in it. All around - this is turning out to be a thoroughly unwholesome unit test." - - -- Ian Bicking -''' - -__rcs_id__ = '$Id: dbapi20.py,v 1.10 2003/10/09 03:14:14 zenzen Exp $' -__version__ = '$Revision: 1.10 $'[11:-2] -__author__ = 'Stuart Bishop ' - -import unittest -import time - -# $Log: dbapi20.py,v $ -# Revision 1.10 2003/10/09 03:14:14 zenzen -# Add test for DB API 2.0 optional extension, where database exceptions -# are exposed as attributes on the Connection object. -# -# Revision 1.9 2003/08/13 01:16:36 zenzen -# Minor tweak from Stefan Fleiter -# -# Revision 1.8 2003/04/10 00:13:25 zenzen -# Changes, as per suggestions by M.-A. Lemburg -# - Add a table prefix, to ensure namespace collisions can always be avoided -# -# Revision 1.7 2003/02/26 23:33:37 zenzen -# Break out DDL into helper functions, as per request by David Rushby -# -# Revision 1.6 2003/02/21 03:04:33 zenzen -# Stuff from Henrik Ekelund: -# added test_None -# added test_nextset & hooks -# -# Revision 1.5 2003/02/17 22:08:43 zenzen -# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize -# defaults to 1 & generic cursor.callproc test added -# -# Revision 1.4 2003/02/15 00:16:33 zenzen -# Changes, as per suggestions and bug reports by M.-A. Lemburg, -# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar -# - Class renamed -# - Now a subclass of TestCase, to avoid requiring the driver stub -# to use multiple inheritance -# - Reversed the polarity of buggy test in test_description -# - Test exception hierarchy correctly -# - self.populate is now self._populate(), so if a driver stub -# overrides self.ddl1 this change propagates -# - VARCHAR columns now have a width, which will hopefully make the -# DDL even more portible (this will be reversed if it causes more problems) -# - cursor.rowcount being checked after various execute and fetchXXX methods -# - Check for fetchall and fetchmany returning empty lists after results -# are exhausted (already checking for empty lists if select retrieved -# nothing -# - Fix bugs in test_setoutputsize_basic and test_setinputsizes -# - -class DatabaseAPI20Test(unittest.TestCase): - ''' Test a database self.driver for DB API 2.0 compatibility. - This implementation tests Gadfly, but the TestCase - is structured so that other self.drivers can subclass this - test case to ensure compiliance with the DB-API. It is - expected that this TestCase may be expanded in the future - if ambiguities or edge conditions are discovered. - - The 'Optional Extensions' are not yet being tested. - - self.drivers should subclass this test, overriding setUp, tearDown, - self.driver, connect_args and connect_kw_args. Class specification - should be as follows: - - import dbapi20 - class mytest(dbapi20.DatabaseAPI20Test): - [...] - - Don't 'import DatabaseAPI20Test from dbapi20', or you will - confuse the unit tester - just 'import dbapi20'. - ''' - - # The self.driver module. This should be the module where the 'connect' - # method is to be found - driver = None - connect_args = () # List of arguments to pass to connect - connect_kw_args = {} # Keyword arguments for connect - table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables - - ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix - ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix - xddl1 = 'drop table %sbooze' % table_prefix - xddl2 = 'drop table %sbarflys' % table_prefix - - lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase - - # Some drivers may need to override these helpers, for example adding - # a 'commit' after the execute. - def executeDDL1(self,cursor): - cursor.execute(self.ddl1) - - def executeDDL2(self,cursor): - cursor.execute(self.ddl2) - - def setUp(self): - ''' self.drivers should override this method to perform required setup - if any is necessary, such as creating the database. - ''' - pass - - def tearDown(self): - ''' self.drivers should override this method to perform required cleanup - if any is necessary, such as deleting the test database. - The default drops the tables that may be created. - ''' - con = self._connect() - try: - cur = con.cursor() - for i, ddl in enumerate((self.xddl1,self.xddl2)): - try: - cur.execute(ddl) - con.commit() - except self.driver.Error: - # Assume table didn't exist. Other tests will check if - # execute is busted. - pass - finally: - con.close() - - def _connect(self): - try: - return self.driver.connect( - *self.connect_args,**self.connect_kw_args - ) - except AttributeError: - self.fail("No connect method found in self.driver module") - - def test_connect(self): - con = self._connect() - con.close() - - def test_apilevel(self): - try: - # Must exist - apilevel = self.driver.apilevel - # Must equal 2.0 - self.assertEqual(apilevel,'2.0') - except AttributeError: - self.fail("Driver doesn't define apilevel") - - def test_threadsafety(self): - try: - # Must exist - threadsafety = self.driver.threadsafety - # Must be a valid value - self.assertTrue(threadsafety in (0,1,2,3)) - except AttributeError: - self.fail("Driver doesn't define threadsafety") - - def test_paramstyle(self): - try: - # Must exist - paramstyle = self.driver.paramstyle - # Must be a valid value - self.assertTrue(paramstyle in ( - 'qmark','numeric','named','format','pyformat' - )) - except AttributeError: - self.fail("Driver doesn't define paramstyle") - - def test_Exceptions(self): - # Make sure required exceptions exist, and are in the - # defined hierarchy. - self.assertTrue(issubclass(self.driver.Warning,StandardError)) - self.assertTrue(issubclass(self.driver.Error,StandardError)) - self.assertTrue( - issubclass(self.driver.InterfaceError,self.driver.Error) - ) - self.assertTrue( - issubclass(self.driver.DatabaseError,self.driver.Error) - ) - self.assertTrue( - issubclass(self.driver.OperationalError,self.driver.Error) - ) - self.assertTrue( - issubclass(self.driver.IntegrityError,self.driver.Error) - ) - self.assertTrue( - issubclass(self.driver.InternalError,self.driver.Error) - ) - self.assertTrue( - issubclass(self.driver.ProgrammingError,self.driver.Error) - ) - self.assertTrue( - issubclass(self.driver.NotSupportedError,self.driver.Error) - ) - - def test_ExceptionsAsConnectionAttributes(self): - # OPTIONAL EXTENSION - # Test for the optional DB API 2.0 extension, where the exceptions - # are exposed as attributes on the Connection object - # I figure this optional extension will be implemented by any - # driver author who is using this test suite, so it is enabled - # by default. - con = self._connect() - drv = self.driver - self.assertTrue(con.Warning is drv.Warning) - self.assertTrue(con.Error is drv.Error) - self.assertTrue(con.InterfaceError is drv.InterfaceError) - self.assertTrue(con.DatabaseError is drv.DatabaseError) - self.assertTrue(con.OperationalError is drv.OperationalError) - self.assertTrue(con.IntegrityError is drv.IntegrityError) - self.assertTrue(con.InternalError is drv.InternalError) - self.assertTrue(con.ProgrammingError is drv.ProgrammingError) - self.assertTrue(con.NotSupportedError is drv.NotSupportedError) - - - def test_commit(self): - con = self._connect() - try: - # Commit must work, even if it doesn't do anything - con.commit() - finally: - con.close() - - def test_rollback(self): - con = self._connect() - # If rollback is defined, it should either work or throw - # the documented exception - if hasattr(con,'rollback'): - try: - con.rollback() - except self.driver.NotSupportedError: - pass - - def test_cursor(self): - con = self._connect() - try: - cur = con.cursor() - finally: - con.close() - - def test_cursor_isolation(self): - con = self._connect() - try: - # Make sure cursors created from the same connection have - # the documented transaction isolation level - cur1 = con.cursor() - cur2 = con.cursor() - self.executeDDL1(cur1) - cur1.execute("insert into %sbooze values ('Victoria Bitter')" % ( - self.table_prefix - )) - cur2.execute("select name from %sbooze" % self.table_prefix) - booze = cur2.fetchall() - self.assertEqual(len(booze),1) - self.assertEqual(len(booze[0]),1) - self.assertEqual(booze[0][0],'Victoria Bitter') - finally: - con.close() - - def test_description(self): - con = self._connect() - try: - cur = con.cursor() - self.executeDDL1(cur) - self.assertEqual(cur.description,None, - 'cursor.description should be none after executing a ' - 'statement that can return no rows (such as DDL)' - ) - cur.execute('select name from %sbooze' % self.table_prefix) - self.assertEqual(len(cur.description),1, - 'cursor.description describes too many columns' - ) - self.assertEqual(len(cur.description[0]),7, - 'cursor.description[x] tuples must have 7 elements' - ) - self.assertEqual(cur.description[0][0].lower(),'name', - 'cursor.description[x][0] must return column name' - ) - self.assertEqual(cur.description[0][1],self.driver.STRING, - 'cursor.description[x][1] must return column type. Got %r' - % cur.description[0][1] - ) - - # Make sure self.description gets reset - self.executeDDL2(cur) - self.assertEqual(cur.description,None, - 'cursor.description not being set to None when executing ' - 'no-result statements (eg. DDL)' - ) - finally: - con.close() - - def test_rowcount(self): - con = self._connect() - try: - cur = con.cursor() - self.executeDDL1(cur) - self.assertEqual(cur.rowcount,-1, - 'cursor.rowcount should be -1 after executing no-result ' - 'statements' - ) - cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( - self.table_prefix - )) - self.assertTrue(cur.rowcount in (-1,1), - 'cursor.rowcount should == number or rows inserted, or ' - 'set to -1 after executing an insert statement' - ) - cur.execute("select name from %sbooze" % self.table_prefix) - self.assertTrue(cur.rowcount in (-1,1), - 'cursor.rowcount should == number of rows returned, or ' - 'set to -1 after executing a select statement' - ) - self.executeDDL2(cur) - self.assertEqual(cur.rowcount,-1, - 'cursor.rowcount not being reset to -1 after executing ' - 'no-result statements' - ) - finally: - con.close() - - lower_func = 'lower' - def test_callproc(self): - con = self._connect() - try: - cur = con.cursor() - if self.lower_func and hasattr(cur,'callproc'): - r = cur.callproc(self.lower_func,('FOO',)) - self.assertEqual(len(r),1) - self.assertEqual(r[0],'FOO') - r = cur.fetchall() - self.assertEqual(len(r),1,'callproc produced no result set') - self.assertEqual(len(r[0]),1, - 'callproc produced invalid result set' - ) - self.assertEqual(r[0][0],'foo', - 'callproc produced invalid results' - ) - finally: - con.close() - - def test_close(self): - con = self._connect() - try: - cur = con.cursor() - finally: - con.close() - - # cursor.execute should raise an Error if called after connection - # closed - self.assertRaises(self.driver.Error,self.executeDDL1,cur) - - # connection.commit should raise an Error if called after connection' - # closed.' - self.assertRaises(self.driver.Error,con.commit) - - # connection.close should raise an Error if called more than once - self.assertRaises(self.driver.Error,con.close) - - def test_execute(self): - con = self._connect() - try: - cur = con.cursor() - self._paraminsert(cur) - finally: - con.close() - - def _paraminsert(self,cur): - self.executeDDL1(cur) - cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( - self.table_prefix - )) - self.assertTrue(cur.rowcount in (-1,1)) - - if self.driver.paramstyle == 'qmark': - cur.execute( - 'insert into %sbooze values (?)' % self.table_prefix, - ("Cooper's",) - ) - elif self.driver.paramstyle == 'numeric': - cur.execute( - 'insert into %sbooze values (:1)' % self.table_prefix, - ("Cooper's",) - ) - elif self.driver.paramstyle == 'named': - cur.execute( - 'insert into %sbooze values (:beer)' % self.table_prefix, - {'beer':"Cooper's"} - ) - elif self.driver.paramstyle == 'format': - cur.execute( - 'insert into %sbooze values (%%s)' % self.table_prefix, - ("Cooper's",) - ) - elif self.driver.paramstyle == 'pyformat': - cur.execute( - 'insert into %sbooze values (%%(beer)s)' % self.table_prefix, - {'beer':"Cooper's"} - ) - else: - self.fail('Invalid paramstyle') - self.assertTrue(cur.rowcount in (-1,1)) - - cur.execute('select name from %sbooze' % self.table_prefix) - res = cur.fetchall() - self.assertEqual(len(res),2,'cursor.fetchall returned too few rows') - beers = [res[0][0],res[1][0]] - beers.sort() - self.assertEqual(beers[0],"Cooper's", - 'cursor.fetchall retrieved incorrect data, or data inserted ' - 'incorrectly' - ) - self.assertEqual(beers[1],"Victoria Bitter", - 'cursor.fetchall retrieved incorrect data, or data inserted ' - 'incorrectly' - ) - - def test_executemany(self): - con = self._connect() - try: - cur = con.cursor() - self.executeDDL1(cur) - largs = [ ("Cooper's",) , ("Boag's",) ] - margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ] - if self.driver.paramstyle == 'qmark': - cur.executemany( - 'insert into %sbooze values (?)' % self.table_prefix, - largs - ) - elif self.driver.paramstyle == 'numeric': - cur.executemany( - 'insert into %sbooze values (:1)' % self.table_prefix, - largs - ) - elif self.driver.paramstyle == 'named': - cur.executemany( - 'insert into %sbooze values (:beer)' % self.table_prefix, - margs - ) - elif self.driver.paramstyle == 'format': - cur.executemany( - 'insert into %sbooze values (%%s)' % self.table_prefix, - largs - ) - elif self.driver.paramstyle == 'pyformat': - cur.executemany( - 'insert into %sbooze values (%%(beer)s)' % ( - self.table_prefix - ), - margs - ) - else: - self.fail('Unknown paramstyle') - self.assertTrue(cur.rowcount in (-1,2), - 'insert using cursor.executemany set cursor.rowcount to ' - 'incorrect value %r' % cur.rowcount - ) - cur.execute('select name from %sbooze' % self.table_prefix) - res = cur.fetchall() - self.assertEqual(len(res),2, - 'cursor.fetchall retrieved incorrect number of rows' - ) - beers = [res[0][0],res[1][0]] - beers.sort() - self.assertEqual(beers[0],"Boag's",'incorrect data retrieved') - self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved') - finally: - con.close() - - def test_fetchone(self): - con = self._connect() - try: - cur = con.cursor() - - # cursor.fetchone should raise an Error if called before - # executing a select-type query - self.assertRaises(self.driver.Error,cur.fetchone) - - # cursor.fetchone should raise an Error if called after - # executing a query that cannot return rows - self.executeDDL1(cur) - self.assertRaises(self.driver.Error,cur.fetchone) - - cur.execute('select name from %sbooze' % self.table_prefix) - self.assertEqual(cur.fetchone(),None, - 'cursor.fetchone should return None if a query retrieves ' - 'no rows' - ) - self.assertTrue(cur.rowcount in (-1,0)) - - # cursor.fetchone should raise an Error if called after - # executing a query that cannot return rows - cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( - self.table_prefix - )) - self.assertRaises(self.driver.Error,cur.fetchone) - - cur.execute('select name from %sbooze' % self.table_prefix) - r = cur.fetchone() - self.assertEqual(len(r),1, - 'cursor.fetchone should have retrieved a single row' - ) - self.assertEqual(r[0],'Victoria Bitter', - 'cursor.fetchone retrieved incorrect data' - ) - self.assertEqual(cur.fetchone(),None, - 'cursor.fetchone should return None if no more rows available' - ) - self.assertTrue(cur.rowcount in (-1,1)) - finally: - con.close() - - samples = [ - 'Carlton Cold', - 'Carlton Draft', - 'Mountain Goat', - 'Redback', - 'Victoria Bitter', - 'XXXX' - ] - - def _populate(self): - ''' Return a list of sql commands to setup the DB for the fetch - tests. - ''' - populate = [ - "insert into %sbooze values ('%s')" % (self.table_prefix,s) - for s in self.samples - ] - return populate - - def test_fetchmany(self): - con = self._connect() - try: - cur = con.cursor() - - # cursor.fetchmany should raise an Error if called without - #issuing a query - self.assertRaises(self.driver.Error,cur.fetchmany,4) - - self.executeDDL1(cur) - for sql in self._populate(): - cur.execute(sql) - - cur.execute('select name from %sbooze' % self.table_prefix) - r = cur.fetchmany() - self.assertEqual(len(r),1, - 'cursor.fetchmany retrieved incorrect number of rows, ' - 'default of arraysize is one.' - ) - cur.arraysize=10 - r = cur.fetchmany(3) # Should get 3 rows - self.assertEqual(len(r),3, - 'cursor.fetchmany retrieved incorrect number of rows' - ) - r = cur.fetchmany(4) # Should get 2 more - self.assertEqual(len(r),2, - 'cursor.fetchmany retrieved incorrect number of rows' - ) - r = cur.fetchmany(4) # Should be an empty sequence - self.assertEqual(len(r),0, - 'cursor.fetchmany should return an empty sequence after ' - 'results are exhausted' - ) - self.assertTrue(cur.rowcount in (-1,6)) - - # Same as above, using cursor.arraysize - cur.arraysize=4 - cur.execute('select name from %sbooze' % self.table_prefix) - r = cur.fetchmany() # Should get 4 rows - self.assertEqual(len(r),4, - 'cursor.arraysize not being honoured by fetchmany' - ) - r = cur.fetchmany() # Should get 2 more - self.assertEqual(len(r),2) - r = cur.fetchmany() # Should be an empty sequence - self.assertEqual(len(r),0) - self.assertTrue(cur.rowcount in (-1,6)) - - cur.arraysize=6 - cur.execute('select name from %sbooze' % self.table_prefix) - rows = cur.fetchmany() # Should get all rows - self.assertTrue(cur.rowcount in (-1,6)) - self.assertEqual(len(rows),6) - self.assertEqual(len(rows),6) - rows = [r[0] for r in rows] - rows.sort() - - # Make sure we get the right data back out - for i in range(0,6): - self.assertEqual(rows[i],self.samples[i], - 'incorrect data retrieved by cursor.fetchmany' - ) - - rows = cur.fetchmany() # Should return an empty list - self.assertEqual(len(rows),0, - 'cursor.fetchmany should return an empty sequence if ' - 'called after the whole result set has been fetched' - ) - self.assertTrue(cur.rowcount in (-1,6)) - - self.executeDDL2(cur) - cur.execute('select name from %sbarflys' % self.table_prefix) - r = cur.fetchmany() # Should get empty sequence - self.assertEqual(len(r),0, - 'cursor.fetchmany should return an empty sequence if ' - 'query retrieved no rows' - ) - self.assertTrue(cur.rowcount in (-1,0)) - - finally: - con.close() - - def test_fetchall(self): - con = self._connect() - try: - cur = con.cursor() - # cursor.fetchall should raise an Error if called - # without executing a query that may return rows (such - # as a select) - self.assertRaises(self.driver.Error, cur.fetchall) - - self.executeDDL1(cur) - for sql in self._populate(): - cur.execute(sql) - - # cursor.fetchall should raise an Error if called - # after executing a a statement that cannot return rows - self.assertRaises(self.driver.Error,cur.fetchall) - - cur.execute('select name from %sbooze' % self.table_prefix) - rows = cur.fetchall() - self.assertTrue(cur.rowcount in (-1,len(self.samples))) - self.assertEqual(len(rows),len(self.samples), - 'cursor.fetchall did not retrieve all rows' - ) - rows = [r[0] for r in rows] - rows.sort() - for i in range(0,len(self.samples)): - self.assertEqual(rows[i],self.samples[i], - 'cursor.fetchall retrieved incorrect rows' - ) - rows = cur.fetchall() - self.assertEqual( - len(rows),0, - 'cursor.fetchall should return an empty list if called ' - 'after the whole result set has been fetched' - ) - self.assertTrue(cur.rowcount in (-1,len(self.samples))) - - self.executeDDL2(cur) - cur.execute('select name from %sbarflys' % self.table_prefix) - rows = cur.fetchall() - self.assertTrue(cur.rowcount in (-1,0)) - self.assertEqual(len(rows),0, - 'cursor.fetchall should return an empty list if ' - 'a select query returns no rows' - ) - - finally: - con.close() - - def test_mixedfetch(self): - con = self._connect() - try: - cur = con.cursor() - self.executeDDL1(cur) - for sql in self._populate(): - cur.execute(sql) - - cur.execute('select name from %sbooze' % self.table_prefix) - rows1 = cur.fetchone() - rows23 = cur.fetchmany(2) - rows4 = cur.fetchone() - rows56 = cur.fetchall() - self.assertTrue(cur.rowcount in (-1,6)) - self.assertEqual(len(rows23),2, - 'fetchmany returned incorrect number of rows' - ) - self.assertEqual(len(rows56),2, - 'fetchall returned incorrect number of rows' - ) - - rows = [rows1[0]] - rows.extend([rows23[0][0],rows23[1][0]]) - rows.append(rows4[0]) - rows.extend([rows56[0][0],rows56[1][0]]) - rows.sort() - for i in range(0,len(self.samples)): - self.assertEqual(rows[i],self.samples[i], - 'incorrect data retrieved or inserted' - ) - finally: - con.close() - - def help_nextset_setUp(self,cur): - ''' Should create a procedure called deleteme - that returns two result sets, first the - number of rows in booze then "name from booze" - ''' - raise NotImplementedError,'Helper not implemented' - #sql=""" - # create procedure deleteme as - # begin - # select count(*) from booze - # select name from booze - # end - #""" - #cur.execute(sql) - - def help_nextset_tearDown(self,cur): - 'If cleaning up is needed after nextSetTest' - raise NotImplementedError,'Helper not implemented' - #cur.execute("drop procedure deleteme") - - def test_nextset(self): - con = self._connect() - try: - cur = con.cursor() - if not hasattr(cur,'nextset'): - return - - try: - self.executeDDL1(cur) - sql=self._populate() - for sql in self._populate(): - cur.execute(sql) - - self.help_nextset_setUp(cur) - - cur.callproc('deleteme') - numberofrows=cur.fetchone() - assert numberofrows[0]== len(self.samples) - assert cur.nextset() - names=cur.fetchall() - assert len(names) == len(self.samples) - s=cur.nextset() - assert s == None,'No more return sets, should return None' - finally: - self.help_nextset_tearDown(cur) - - finally: - con.close() - - def test_nextset(self): - raise NotImplementedError,'Drivers need to override this test' - - def test_arraysize(self): - # Not much here - rest of the tests for this are in test_fetchmany - con = self._connect() - try: - cur = con.cursor() - self.assertTrue(hasattr(cur,'arraysize'), - 'cursor.arraysize must be defined' - ) - finally: - con.close() - - def test_setinputsizes(self): - con = self._connect() - try: - cur = con.cursor() - cur.setinputsizes( (25,) ) - self._paraminsert(cur) # Make sure cursor still works - finally: - con.close() - - def test_setoutputsize_basic(self): - # Basic test is to make sure setoutputsize doesn't blow up - con = self._connect() - try: - cur = con.cursor() - cur.setoutputsize(1000) - cur.setoutputsize(2000,0) - self._paraminsert(cur) # Make sure the cursor still works - finally: - con.close() - - def test_setoutputsize(self): - # Real test for setoutputsize is driver dependent - raise NotImplementedError,'Driver need to override this test' - - def test_None(self): - con = self._connect() - try: - cur = con.cursor() - self.executeDDL1(cur) - cur.execute('insert into %sbooze values (NULL)' % self.table_prefix) - cur.execute('select name from %sbooze' % self.table_prefix) - r = cur.fetchall() - self.assertEqual(len(r),1) - self.assertEqual(len(r[0]),1) - self.assertEqual(r[0][0],None,'NULL value not returned as None') - finally: - con.close() - - def test_Date(self): - d1 = self.driver.Date(2002,12,25) - d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0))) - # Can we assume this? API doesn't specify, but it seems implied - # self.assertEqual(str(d1),str(d2)) - - def test_Time(self): - t1 = self.driver.Time(13,45,30) - t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0))) - # Can we assume this? API doesn't specify, but it seems implied - # self.assertEqual(str(t1),str(t2)) - - def test_Timestamp(self): - t1 = self.driver.Timestamp(2002,12,25,13,45,30) - t2 = self.driver.TimestampFromTicks( - time.mktime((2002,12,25,13,45,30,0,0,0)) - ) - # Can we assume this? API doesn't specify, but it seems implied - # self.assertEqual(str(t1),str(t2)) - - def test_Binary(self): - b = self.driver.Binary('Something') - b = self.driver.Binary('') - - def test_STRING(self): - self.assertTrue(hasattr(self.driver,'STRING'), - 'module.STRING must be defined' - ) - - def test_BINARY(self): - self.assertTrue(hasattr(self.driver,'BINARY'), - 'module.BINARY must be defined.' - ) - - def test_NUMBER(self): - self.assertTrue(hasattr(self.driver,'NUMBER'), - 'module.NUMBER must be defined.' - ) - - def test_DATETIME(self): - self.assertTrue(hasattr(self.driver,'DATETIME'), - 'module.DATETIME must be defined.' - ) - - def test_ROWID(self): - self.assertTrue(hasattr(self.driver,'ROWID'), - 'module.ROWID must be defined.' - ) - diff --git a/tests2/dbapitests.py b/tests2/dbapitests.py deleted file mode 100755 index a2fd8c2e..00000000 --- a/tests2/dbapitests.py +++ /dev/null @@ -1,45 +0,0 @@ -import sys -import unittest -from testutils import * -import dbapi20 - -def main(): - add_to_path() - import pyodbc - - from optparse import OptionParser - parser = OptionParser(usage="usage: %prog [options] connection_string") - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - - (options, args) = parser.parse_args() - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - connection_string = load_setup_connection_string('dbapitests') - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - class test_pyodbc(dbapi20.DatabaseAPI20Test): - driver = pyodbc - connect_args = [ connection_string ] - connect_kw_args = {} - - def test_nextset(self): pass - def test_setoutputsize(self): pass - def test_ExceptionsAsConnectionAttributes(self): pass - - suite = unittest.makeSuite(test_pyodbc, 'test') - testRunner = unittest.TextTestRunner(verbosity=(options.verbose > 1) and 9 or 0) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/exceltests.py b/tests2/exceltests.py deleted file mode 100755 index 10062b0c..00000000 --- a/tests2/exceltests.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python - -# Tests for reading from Excel files. -# -# I have not been able to successfully create or modify Excel files. - -import sys, os, re -import unittest -from os.path import abspath -from testutils import * - -CNXNSTRING = None - -class ExcelTestCase(unittest.TestCase): - - def __init__(self, method_name): - unittest.TestCase.__init__(self, method_name) - - def setUp(self): - self.cnxn = pyodbc.connect(CNXNSTRING, autocommit=True) - self.cursor = self.cnxn.cursor() - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - - def test_read_sheet(self): - # The first method of reading data is to access worksheets by name in this format [name$]. - # - # Our second sheet is named Sheet2 and has two columns. The first has values 10, 20, 30, etc. - - rows = self.cursor.execute("select * from [Sheet2$]").fetchall() - self.assertEqual(len(rows), 5) - - for index, row in enumerate(rows): - self.assertEqual(row.s2num, float(index + 1) * 10) - - def test_read_range(self): - # The second method of reading data is to assign a name to a range of cells and access that as a table. - # - # Our first worksheet has a section named Table1. The first column has values 1, 2, 3, etc. - - rows = self.cursor.execute("select * from Table1").fetchall() - self.assertEqual(len(rows), 10) - - for index, row in enumerate(rows): - self.assertEqual(row.num, float(index + 1)) - self.assertEqual(row.val, chr(ord('a') + index)) - - def test_tables(self): - # This is useful for figuring out what is available - tables = [ row.table_name for row in self.cursor.tables() ] - assert 'Sheet2$' in tables, 'tables: %s' % ' '.join(tables) - - - # def test_append(self): - # rows = self.cursor.execute("select s2num, s2val from [Sheet2$]").fetchall() - # - # print rows - # - # nextnum = max([ row.s2num for row in rows ]) + 10 - # - # self.cursor.execute("insert into [Sheet2$](s2num, s2val) values (?, 'z')", nextnum) - # - # row = self.cursor.execute("select s2num, s2val from [Sheet2$] where s2num=?", nextnum).fetchone() - # self.assertTrue(row) - # - # print 'added:', nextnum, len(rows), 'rows' - # - # self.assertEqual(row.s2num, nextnum) - # self.assertEqual(row.s2val, 'z') - # - # self.cnxn.commit() - - -def main(): - from optparse import OptionParser - parser = OptionParser() #usage=usage) - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - - (options, args) = parser.parse_args() - - if args: - parser.error('no arguments expected') - - global CNXNSTRING - - path = dirname(abspath(__file__)) - filename = join(path, 'test.xls') - assert os.path.exists(filename) - CNXNSTRING = 'Driver={Microsoft Excel Driver (*.xls)};DBQ=%s;READONLY=FALSE' % filename - - if options.verbose: - cnxn = pyodbc.connect(CNXNSTRING, autocommit=True) - print_library_info(cnxn) - cnxn.close() - - suite = load_tests(ExcelTestCase, options.test) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - add_to_path() - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/informixtests.py b/tests2/informixtests.py deleted file mode 100755 index 36525e90..00000000 --- a/tests2/informixtests.py +++ /dev/null @@ -1,1275 +0,0 @@ -#!/usr/bin/python -# -*- coding: latin-1 -*- - -usage = """\ -usage: %prog [options] connection_string - -Unit tests for Informix DB. To use, pass a connection string as the parameter. -The tests will create and drop tables t1 and t2 as necessary. - -These run using the version from the 'build' directory, not the version -installed into the Python directories. You must run python setup.py build -before running the tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [informixtests] - connection-string=DRIVER={IBM INFORMIX ODBC DRIVER (64-bit)};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db -""" - -import sys, os, re -import unittest -from decimal import Decimal -from datetime import datetime, date, time -from os.path import join, getsize, dirname, abspath -from testutils import * - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class InformixTestCase(unittest.TestCase): - - SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] - LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] - - ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] - UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] - IMAGE_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] - - def __init__(self, method_name, connection_string): - unittest.TestCase.__init__(self, method_name) - self.connection_string = connection_string - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string) - self.cursor = self.cnxn.cursor() - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - for i in range(3): - try: - self.cursor.execute("drop procedure proc%d" % i) - self.cnxn.commit() - except: - pass - - try: - self.cursor.execute('drop function func1') - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_multiple_bindings(self): - "More than one bind and select on a cursor" - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t1 values (?)", 2) - self.cursor.execute("insert into t1 values (?)", 3) - for i in range(3): - self.cursor.execute("select n from t1 where n < ?", 10) - self.cursor.execute("select n from t1 where n < 3") - - - def test_different_bindings(self): - self.cursor.execute("create table t1(n int)") - self.cursor.execute("create table t2(d datetime)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t2 values (?)", datetime.now()) - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - def test_noscan(self): - self.assertEqual(self.cursor.noscan, False) - self.cursor.noscan = True - self.assertEqual(self.cursor.noscan, True) - - def test_guid(self): - self.cursor.execute("create table t1(g1 uniqueidentifier)") - self.cursor.execute("insert into t1 values (newid())") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(len(v), 36) - - def test_nextset(self): - self.cursor.execute("create table t1(i int)") - for i in range(4): - self.cursor.execute("insert into t1(i) values(?)", i) - - self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") - - for i, row in enumerate(self.cursor): - self.assertEqual(i, row.i) - - self.assertEqual(self.cursor.nextset(), True) - - for i, row in enumerate(self.cursor): - self.assertEqual(i + 2, row.i) - - def test_fixed_unicode(self): - value = u"t\xebsting" - self.cursor.execute("create table t1(s nchar(7))") - self.cursor.execute("insert into t1 values(?)", u"t\xebsting") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), unicode) - self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL - self.assertEqual(v, value) - - - def _test_strtype(self, sqltype, value, colsize=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), type(value)) - - if value is not None: - self.assertEqual(len(v), len(value)) - - self.assertEqual(v, value) - - # Reported by Andy Hochhaus in the pyodbc group: In 2.1.7 and earlier, a hardcoded length of 255 was used to - # determine whether a parameter was bound as a SQL_VARCHAR or SQL_LONGVARCHAR. Apparently SQL Server chokes if - # we bind as a SQL_LONGVARCHAR and the target column size is 8000 or less, which is considers just SQL_VARCHAR. - # This means binding a 256 character value would cause problems if compared with a VARCHAR column under - # 8001. We now use SQLGetTypeInfo to determine the time to switch. - # - # [42000] [Microsoft][SQL Server Native Client 10.0][SQL Server]The data types varchar and text are incompatible in the equal to operator. - - self.cursor.execute("select * from t1 where s=?", value) - - - def _test_strliketype(self, sqltype, value, colsize=None): - """ - The implementation for text, image, ntext, and binary. - - These types do not support comparison operators. - """ - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), type(value)) - - if value is not None: - self.assertEqual(len(v), len(value)) - - self.assertEqual(v, value) - - - # - # varchar - # - - def test_varchar_null(self): - self._test_strtype('varchar', None, 100) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, len(value)) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_varchar_%s' % len(value)] = _maketest(value) - - def test_varchar_many(self): - self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") - - v1 = 'ABCDEFGHIJ' * 30 - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); - row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() - - self.assertEqual(v1, row.c1) - self.assertEqual(v2, row.c2) - self.assertEqual(v3, row.c3) - - def test_varchar_upperlatin(self): - self._test_strtype('varchar', '') - - # - # unicode - # - - def test_unicode_null(self): - self._test_strtype('nvarchar', None, 100) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, len(value)) - return t - for value in UNICODE_FENCEPOSTS: - locals()['test_unicode_%s' % len(value)] = _maketest(value) - - def test_unicode_upperlatin(self): - self._test_strtype('varchar', '') - - # - # binary - # - - def test_null_binary(self): - self._test_strtype('varbinary', None, 100) - - def test_large_null_binary(self): - # Bug 1575064 - self._test_strtype('varbinary', None, 4000) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varbinary', buffer(value), len(value)) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_binary_%s' % len(value)] = _maketest(value) - - # - # image - # - - def test_image_null(self): - self._test_strliketype('image', None) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strliketype('image', buffer(value)) - return t - for value in IMAGE_FENCEPOSTS: - locals()['test_image_%s' % len(value)] = _maketest(value) - - def test_image_upperlatin(self): - self._test_strliketype('image', buffer('')) - - # - # text - # - - # def test_empty_text(self): - # self._test_strliketype('text', buffer('')) - - def test_null_text(self): - self._test_strliketype('text', None) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strliketype('text', value) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_text_%s' % len(value)] = _maketest(value) - - def test_text_upperlatin(self): - self._test_strliketype('text', '') - - # - # bit - # - - def test_bit(self): - value = True - self.cursor.execute("create table t1(b bit)") - self.cursor.execute("insert into t1 values (?)", value) - v = self.cursor.execute("select b from t1").fetchone()[0] - self.assertEqual(type(v), bool) - self.assertEqual(v, value) - - # - # decimal - # - - def _decimal(self, precision, scale, negative): - # From test provided by planders (thanks!) in Issue 91 - - self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) - - # Construct a decimal that uses the maximum precision and scale. - decStr = '9' * (precision - scale) - if scale: - decStr = decStr + "." + '9' * scale - if negative: - decStr = "-" + decStr - value = Decimal(decStr) - - self.cursor.execute("insert into t1 values(?)", value) - - v = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(v, value) - - def _maketest(p, s, n): - def t(self): - self._decimal(p, s, n) - return t - for (p, s, n) in [ (1, 0, False), - (1, 0, True), - (6, 0, False), - (6, 2, False), - (6, 4, True), - (6, 6, True), - (38, 0, False), - (38, 10, False), - (38, 38, False), - (38, 0, True), - (38, 10, True), - (38, 38, True) ]: - locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) - - - def test_decimal_e(self): - """Ensure exponential notation decimals are properly handled""" - value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 - self.cursor.execute("create table t1(d decimal(10, 2))") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_subquery_params(self): - """Ensure parameter markers work in a subquery""" - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - row = self.cursor.execute(""" - select x.id - from ( - select id - from t1 - where s = ? - and id between ? and ? - ) x - """, 'test', 1, 10).fetchone() - self.assertNotEqual(row, None) - self.assertEqual(row[0], 1) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_string(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_fixed_str(self): - value = "testing" - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL - self.assertEqual(v, value) - - def test_empty_unicode(self): - self.cursor.execute("create table t1(s nvarchar(20))") - self.cursor.execute("insert into t1 values(?)", u"") - - def test_unicode_query(self): - self.cursor.execute(u"select 1") - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - # - # date, time, datetime - # - - def test_datetime(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(value), datetime) - self.assertEqual(value, result) - - def test_datetime_fraction(self): - # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime - # supported is xxx000. - - value = datetime(2007, 1, 15, 3, 4, 5, 123000) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(value), datetime) - self.assertEqual(result, value) - - def test_datetime_fraction_rounded(self): - # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the - # database supports. - - full = datetime(2007, 1, 15, 3, 4, 5, 123456) - rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", full) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(result, rounded) - - def test_date(self): - value = date.today() - - self.cursor.execute("create table t1(d date)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(type(value), date) - self.assertEqual(value, result) - - def test_time(self): - value = datetime.now().time() - - # We aren't yet writing values using the new extended time type so the value written to the database is only - # down to the second. - value = value.replace(microsecond=0) - - self.cursor.execute("create table t1(t time)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select t from t1").fetchone()[0] - self.assertEqual(type(value), time) - self.assertEqual(value, result) - - def test_datetime2(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime2)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(value), datetime) - self.assertEqual(value, result) - - # - # ints and floats - # - - def test_int(self): - value = 1234 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_int(self): - value = -1 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_bigint(self): - input = 3000000000 - self.cursor.execute("create table t1(d bigint)") - self.cursor.execute("insert into t1 values (?)", input) - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(result, input) - - def test_float(self): - value = 1234.567 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - - # - # stored procedures - # - - # def test_callproc(self): - # "callproc with a simple input-only stored procedure" - # pass - - def test_sp_results(self): - self.cursor.execute( - """ - Create procedure proc1 - AS - select top 10 name, id, xtype, refdate - from sysobjects - """) - rows = self.cursor.execute("exec proc1").fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - - def test_sp_results_from_temp(self): - - # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. - # If you don't do this, you'd need to call nextset() once to skip it. - - self.cursor.execute( - """ - Create procedure proc1 - AS - set nocount on - select top 10 name, id, xtype, refdate - into #tmptable - from sysobjects - - select * from #tmptable - """) - self.cursor.execute("exec proc1") - self.assertTrue(self.cursor.description is not None) - self.assertTrue(len(self.cursor.description) == 4) - - rows = self.cursor.fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - - def test_sp_results_from_vartbl(self): - self.cursor.execute( - """ - Create procedure proc1 - AS - set nocount on - declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime) - - insert into @tmptbl - select top 10 name, id, xtype, refdate - from sysobjects - - select * from @tmptbl - """) - self.cursor.execute("exec proc1") - rows = self.cursor.fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - def test_sp_with_dates(self): - # Reported in the forums that passing two datetimes to a stored procedure doesn't work. - self.cursor.execute( - """ - if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) - drop procedure [dbo].[test_sp] - """) - self.cursor.execute( - """ - create procedure test_sp(@d1 datetime, @d2 datetime) - AS - declare @d as int - set @d = datediff(year, @d1, @d2) - select @d - """) - self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(rows[0][0] == 0) # 0 years apart - - def test_sp_with_none(self): - # Reported in the forums that passing None caused an error. - self.cursor.execute( - """ - if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) - drop procedure [dbo].[test_sp] - """) - self.cursor.execute( - """ - create procedure test_sp(@x varchar(20)) - AS - declare @y varchar(20) - set @y = @x - select @y - """) - self.cursor.execute("exec test_sp ?", None) - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(rows[0][0] == None) # 0 years apart - - - # - # rowcount - # - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - """ - Ensure Cursor.rowcount is set properly after a select statement. - - pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a - select statement, so we'll test for that behavior. This is valid behavior according to the DB API - specification, but people don't seem to like it. - """ - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, -1) - - rows = self.cursor.fetchall() - self.assertEqual(len(rows), count) - self.assertEqual(self.cursor.rowcount, -1) - - def test_rowcount_reset(self): - "Ensure rowcount is reset to -1" - - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.assertEqual(self.cursor.rowcount, 1) - - self.cursor.execute("create table t2(i int)") - self.assertEqual(self.cursor.rowcount, -1) - - # - # always return Cursor - # - - # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very - # confusing when things went wrong and added very little value even when things went right since users could always - # use: cursor.execute("...").rowcount - - def test_retcursor_delete(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_select(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("select * from t1") - self.assertEqual(v, self.cursor) - - # - # misc - # - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_temp_select(self): - # A project was failing to create temporary tables via select into. - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(v, "testing") - - self.cursor.execute("select s into t2 from t1") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(v, "testing") - - - def test_money(self): - d = Decimal('123456.78') - self.cursor.execute("create table t1(i int identity(1,1), m money)") - self.cursor.execute("insert into t1(m) values (?)", d) - v = self.cursor.execute("select m from t1").fetchone()[0] - self.assertEqual(v, d) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_one(self): - "Pass executemany a single sequence" - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, "test") ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_failure(self): - """ - Ensure that an exception is raised if one query in an executemany fails. - """ - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, 'good'), - ('error', 'not an int'), - (3, 'good') ] - - self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_concatenation(self): - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") - self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) - - row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() - - self.assertEqual(row.both, v2 + v3) - - def test_view_select(self): - # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. - - # Create a table (t1) with 3 rows and a view (t2) into it. - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") - for i in range(3): - self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) - self.cursor.execute("create view t2 as select * from t1") - - # Select from the view - self.cursor.execute("select * from t2") - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(len(rows) == 3) - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - def test_unicode_results(self): - "Ensure unicode_results forces Unicode" - othercnxn = pyodbc.connect(self.connection_string, unicode_results=True) - othercursor = othercnxn.cursor() - - # ANSI data in an ANSI column ... - othercursor.execute("create table t1(s varchar(20))") - othercursor.execute("insert into t1 values(?)", 'test') - - # ... should be returned as Unicode - value = othercursor.execute("select s from t1").fetchone()[0] - self.assertEqual(value, u'test') - - - def test_informix_callproc(self): - try: - self.cursor.execute("drop procedure pyodbctest") - self.cnxn.commit() - except: - pass - - self.cursor.execute("create table t1(s varchar(10))") - self.cursor.execute("insert into t1 values(?)", "testing") - - self.cursor.execute(""" - create procedure pyodbctest @var1 varchar(32) - as - begin - select s - from t1 - return - end - """) - self.cnxn.commit() - - # for row in self.cursor.procedureColumns('pyodbctest'): - # print row.procedure_name, row.column_name, row.column_type, row.type_name - - self.cursor.execute("exec pyodbctest 'hi'") - - # print self.cursor.description - # for row in self.cursor: - # print row.s - - def test_skip(self): - # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. - - self.cursor.execute("create table t1(id int)"); - for i in range(1, 5): - self.cursor.execute("insert into t1 values(?)", i) - self.cursor.execute("select id from t1 order by id") - self.assertEqual(self.cursor.fetchone()[0], 1) - self.cursor.skip(2) - self.assertEqual(self.cursor.fetchone()[0], 4) - - def test_timeout(self): - self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) - - self.cnxn.timeout = 30 - self.assertEqual(self.cnxn.timeout, 30) - - self.cnxn.timeout = 0 - self.assertEqual(self.cnxn.timeout, 0) - - def test_sets_execute(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) - - self.assertRaises(pyodbc.ProgrammingError, f) - - def test_sets_executemany(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.executemany("insert into t1 (word) values (?)", [words]) - - self.assertRaises(TypeError, f) - - def test_row_execute(self): - "Ensure we can use a Row object as a parameter to execute" - self.cursor.execute("create table t1(n int, s varchar(10))") - self.cursor.execute("insert into t1 values (1, 'a')") - row = self.cursor.execute("select n, s from t1").fetchone() - self.assertNotEqual(row, None) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.execute("insert into t2 values (?, ?)", row) - - def test_row_executemany(self): - "Ensure we can use a Row object as a parameter to executemany" - self.cursor.execute("create table t1(n int, s varchar(10))") - - for i in range(3): - self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) - - rows = self.cursor.execute("select n, s from t1").fetchall() - self.assertNotEqual(len(rows), 0) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.executemany("insert into t2 values (?, ?)", rows) - - def test_description(self): - "Ensure cursor.description is correct" - - self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") - self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") - self.cursor.execute("select * from t1") - - # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the - # items I do know. - - # int - t = self.cursor.description[0] - self.assertEqual(t[0], 'n') - self.assertEqual(t[1], int) - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # varchar(8) - t = self.cursor.description[1] - self.assertEqual(t[0], 's') - self.assertEqual(t[1], str) - self.assertEqual(t[4], 8) # precision - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # decimal(5, 2) - t = self.cursor.description[2] - self.assertEqual(t[0], 'd') - self.assertEqual(t[1], Decimal) - self.assertEqual(t[4], 5) # precision - self.assertEqual(t[5], 2) # scale - self.assertEqual(t[6], True) # nullable - - - def test_none_param(self): - "Ensure None can be used for params other than the first" - # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used - # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the - # first column, but did not work for later columns. - # - # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, - # binary/varbinary won't allow an implicit conversion. - - self.cursor.execute("create table t1(n int, blob varbinary(max))") - self.cursor.execute("insert into t1 values (1, newid())") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row.n, 1) - self.assertEqual(type(row.blob), buffer) - - self.cursor.execute("update t1 set n=?, blob=?", 2, None) - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row.n, 2) - self.assertEqual(row.blob, None) - - - def test_output_conversion(self): - def convert(value): - # `value` will be a string. We'll simply add an X at the beginning at the end. - return 'X' + value + 'X' - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) - self.cursor.execute("create table t1(n int, v varchar(10))") - self.cursor.execute("insert into t1 values (1, '123.45')") - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - - # Now clear the conversions and try again. There should be no Xs this time. - self.cnxn.clear_output_converters() - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - - - def test_too_large(self): - """Ensure error raised if insert fails due to truncation""" - value = 'x' * 1000 - self.cursor.execute("create table t1(s varchar(800))") - def test(): - self.cursor.execute("insert into t1 values (?)", value) - self.assertRaises(pyodbc.DataError, test) - - def test_geometry_null_insert(self): - def convert(value): - return value - - self.cnxn.add_output_converter(-151, convert) # -151 is SQL Server's geometry - self.cursor.execute("create table t1(n int, v geometry)") - self.cursor.execute("insert into t1 values (?, ?)", 1, None) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, None) - self.cnxn.clear_output_converters() - - def test_login_timeout(self): - # This can only test setting since there isn't a way to cause it to block on the server side. - cnxns = pyodbc.connect(self.connection_string, timeout=2) - - def test_row_equal(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test')") - row1 = self.cursor.execute("select n, s from t1").fetchone() - row2 = self.cursor.execute("select n, s from t1").fetchone() - b = (row1 == row2) - self.assertEqual(b, True) - - def test_row_gtlt(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test1')") - self.cursor.execute("insert into t1 values (1, 'test2')") - rows = self.cursor.execute("select n, s from t1 order by s").fetchall() - self.assertTrue(rows[0] < rows[1]) - self.assertTrue(rows[0] <= rows[1]) - self.assertTrue(rows[1] > rows[0]) - self.assertTrue(rows[1] >= rows[0]) - self.assertTrue(rows[0] != rows[1]) - - rows = list(rows) - rows.sort() # uses < - - def test_context_manager(self): - with pyodbc.connect(self.connection_string) as cnxn: - cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - - # The connection should be closed now. - def test(): - cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertRaises(pyodbc.ProgrammingError, test) - - def test_untyped_none(self): - # From issue 129 - value = self.cursor.execute("select ?", None).fetchone()[0] - self.assertEqual(value, None) - - def test_large_update_nodata(self): - self.cursor.execute('create table t1(a varbinary(max))') - hundredkb = buffer('x'*100*1024) - self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) - - def test_func_param(self): - self.cursor.execute(''' - create function func1 (@testparam varchar(4)) - returns @rettest table (param varchar(4)) - as - begin - insert @rettest - select @testparam - return - end - ''') - self.cnxn.commit() - value = self.cursor.execute("select * from func1(?)", 'test').fetchone()[0] - self.assertEqual(value, 'test') - - def test_no_fetch(self): - # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to - # confuse the driver. - self.cursor.execute('select 1') - self.cursor.execute('select 1') - self.cursor.execute('select 1') - - def test_drivers(self): - drivers = pyodbc.drivers() - self.assertEqual(list, type(drivers)) - self.assertTrue(len(drivers) > 1) - - m = re.search('DRIVER={?([^}]+?)}?;', self.connection_string, re.IGNORECASE) - current = m.group(1) - self.assertTrue(current in drivers) - - def test_prepare_cleanup(self): - # When statement is prepared, it is kept in case the next execute uses the same statement. This must be - # removed when a non-execute statement is used that returns results, such as SQLTables. - - self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") - self.cursor.fetchone() - - self.cursor.tables("bogus") - - self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") - self.cursor.fetchone() - - -def main(): - from optparse import OptionParser - parser = OptionParser(usage=usage) - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - - (options, args) = parser.parse_args() - - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - connection_string = load_setup_connection_string('informixtests') - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - if options.verbose: - cnxn = pyodbc.connect(connection_string) - print_library_info(cnxn) - cnxn.close() - - suite = load_tests(InformixTestCase, options.test, connection_string) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - - add_to_path() - - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/mysqltests.py b/tests2/mysqltests.py deleted file mode 100755 index d756c00c..00000000 --- a/tests2/mysqltests.py +++ /dev/null @@ -1,762 +0,0 @@ -#!/usr/bin/python -# -*- coding: latin-1 -*- - -usage = """\ -usage: %prog [options] connection_string - -Unit tests for MySQL. To use, pass a connection string as the parameter. -The tests will create and drop tables t1 and t2 as necessary. - -These tests use the pyodbc library from the build directory, not the version installed in your -Python directories. You must run `python setup.py build` before running these tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [mysqltests] - connection-string=DRIVER=MySQL ODBC 8.0 ANSI Driver;charset=utf8mb4;SERVER=localhost;DATABASE=pyodbc;UID=root;PWD=rootpw - -Note: Use the "ANSI" (not the "Unicode") driver and include charset=utf8mb4 in the connection string so the high-Unicode tests won't fail. -""" - -import sys, os, re -import unittest -from decimal import Decimal -from datetime import datetime, date, time -from os.path import join, getsize, dirname, abspath, basename -from testutils import * - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of composed of `seed` to make a string `length` characters long. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class MySqlTestCase(unittest.TestCase): - - SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] - LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] - - ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] - UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] - BLOB_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] - - def __init__(self, method_name, connection_string): - unittest.TestCase.__init__(self, method_name) - self.connection_string = connection_string - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string) - self.cursor = self.cnxn.cursor() - - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') - self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') - self.cnxn.setencoding(str, encoding='utf-8') - self.cnxn.setencoding(unicode, encoding='utf-8', ctype=pyodbc.SQL_CHAR) - - # As of libmyodbc5w 5.3 SQLGetTypeInfo returns absurdly small sizes - # leading to slow writes. Override them: - self.cnxn.maxwrite = 1024 * 1024 * 1024 - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - for i in range(3): - try: - self.cursor.execute("drop procedure proc%d" % i) - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_multiple_bindings(self): - "More than one bind and select on a cursor" - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t1 values (?)", 2) - self.cursor.execute("insert into t1 values (?)", 3) - for i in range(3): - self.cursor.execute("select n from t1 where n < ?", 10) - self.cursor.execute("select n from t1 where n < 3") - - - def test_different_bindings(self): - self.cursor.execute("create table t1(n int)") - self.cursor.execute("create table t2(d datetime)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t2 values (?)", datetime.now()) - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - def _test_strtype(self, sqltype, value, colsize=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - try: - self.cursor.execute(sql) - except: - print '>>>>', sql - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - - # Removing this check for now until I get the charset working properly. - # If we use latin1, results are 'str' instead of 'unicode', which would be - # correct. Setting charset to ucs-2 causes a crash in SQLGetTypeInfo(SQL_DATETIME). - # self.assertEqual(type(v), type(value)) - - if value is not None: - self.assertEqual(len(v), len(value)) - - self.assertEqual(v, value) - - def test_raw_encoding(self): - # Read something that is valid ANSI and make sure it comes through. - # The database is actually going to send us UTF-8 so don't use extended - # characters. - # - # REVIEW: Is there a good way to write UTF-8 into the database and read - # it out? - self.cnxn.setencoding(str, encoding='raw') - - expected = "testing" - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values (?)", expected) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(result, expected) - - def test_raw_decoding(self): - # Read something that is valid ANSI and make sure it comes through. - # The database is actually going to send us UTF-8 so don't use extended - # characters. - # - # REVIEW: Is there a good way to write UTF-8 into the database and read - # it out? - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='raw') - self._test_strtype('varchar', _TESTSTR, 100) - - # - # varchar - # - - def test_varchar_null(self): - self._test_strtype('varchar', None, 100) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, max(1, len(value))) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_varchar_%s' % len(value)] = _maketest(value) - - # Generate a test using Unicode. - for value in UNICODE_FENCEPOSTS: - locals()['test_wvarchar_%s' % len(value)] = _maketest(value) - - def test_varchar_many(self): - self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") - - v1 = 'ABCDEFGHIJ' * 30 - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); - row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() - - self.assertEqual(v1, row.c1) - self.assertEqual(v2, row.c2) - self.assertEqual(v3, row.c3) - - def test_varchar_upperlatin(self): - self._test_strtype('varchar', u'', colsize=3) - - # - # binary - # - - def test_null_binary(self): - self._test_strtype('varbinary', None, 100) - - def test_large_null_binary(self): - # Bug 1575064 - self._test_strtype('varbinary', None, 4000) - - # Generate a test for each fencepost size: test_binary_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varbinary', bytearray(value), max(1, len(value))) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_binary_%s' % len(value)] = _maketest(value) - - # - # blob - # - - def test_blob_null(self): - self._test_strtype('blob', None) - - # Generate a test for each fencepost size: test_blob_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('blob', bytearray(value)) - return t - for value in BLOB_FENCEPOSTS: - locals()['test_blob_%s' % len(value)] = _maketest(value) - - def test_blob_upperlatin(self): - self._test_strtype('blob', bytearray('')) - - # - # text - # - - def test_null_text(self): - self._test_strtype('text', None) - - # Generate a test for each fencepost size: test_text_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('text', value) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_text_%s' % len(value)] = _maketest(value) - - def test_text_upperlatin(self): - self._test_strtype('text', u'') - - # - # unicode - # - - def test_unicode_query(self): - self.cursor.execute(u"select 1") - - # - # bit - # - - # The MySQL driver maps BIT colums to the ODBC bit data type, but they aren't behaving quite like a Boolean value - # (which is what the ODBC bit data type really represents). The MySQL BOOL data type is just an alias for a small - # integer, so pyodbc can't recognize it and map it back to True/False. - # - # You can use both BIT and BOOL and they will act as you expect if you treat them as integers. You can write 0 and - # 1 to them and they will work. - - # def test_bit(self): - # value = True - # self.cursor.execute("create table t1(b bit)") - # self.cursor.execute("insert into t1 values (?)", value) - # v = self.cursor.execute("select b from t1").fetchone()[0] - # self.assertEqual(type(v), bool) - # self.assertEqual(v, value) - # - # def test_bit_string_true(self): - # self.cursor.execute("create table t1(b bit)") - # self.cursor.execute("insert into t1 values (?)", "xyzzy") - # v = self.cursor.execute("select b from t1").fetchone()[0] - # self.assertEqual(type(v), bool) - # self.assertEqual(v, True) - # - # def test_bit_string_false(self): - # self.cursor.execute("create table t1(b bit)") - # self.cursor.execute("insert into t1 values (?)", "") - # v = self.cursor.execute("select b from t1").fetchone()[0] - # self.assertEqual(type(v), bool) - # self.assertEqual(v, False) - - # - # decimal - # - - def test_small_decimal(self): - # value = Decimal('1234567890987654321') - value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) - self.cursor.execute("create table t1(d numeric(19))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def test_small_decimal_scale(self): - # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation - # example in the C Data Types appendix. - value = '1000.10' - value = Decimal(value) - self.cursor.execute("create table t1(d numeric(20,6))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def test_negative_decimal_scale(self): - value = Decimal('-10.0010') - self.cursor.execute("create table t1(d numeric(19,4))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - def test_subquery_params(self): - """Ensure parameter markers work in a subquery""" - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - row = self.cursor.execute(""" - select x.id - from ( - select id - from t1 - where s = ? - and id between ? and ? - ) x - """, 'test', 1, 10).fetchone() - self.assertNotEqual(row, None) - self.assertEqual(row[0], 1) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_string(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_fixed_str(self): - value = u"testing" - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - # - # date, time, datetime - # - - def test_datetime(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(value, result) - - def test_date(self): - value = date(2001, 1, 1) - - self.cursor.execute("create table t1(dt date)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), type(value)) - self.assertEqual(result, value) - - # - # ints and floats - # - - def test_int(self): - value = 1234 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_int(self): - value = -1 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_bigint(self): - - # This fails on 64-bit Fedora with 5.1. - # Should return 0x0123456789 - # Does return 0x0000000000 - # - # Top 4 bytes are returned as 0x00 00 00 00. If the input is high enough, they are returned as 0xFF FF FF FF. - input = 0x123456789 - self.cursor.execute("create table t1(d bigint)") - self.cursor.execute("insert into t1 values (?)", input) - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(result, input) - - def test_float(self): - value = 1234.5 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - - def test_date(self): - value = date.today() - - self.cursor.execute("create table t1(d date)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(value, result) - - - def test_time(self): - value = datetime.now().time() - - # We aren't yet writing values using the new extended time type so the value written to the database is only - # down to the second. - value = value.replace(microsecond=0) - - self.cursor.execute("create table t1(t time)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select t from t1").fetchone()[0] - self.assertEqual(value, result) - - # - # misc - # - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - """ - Ensure Cursor.rowcount is set properly after a select statement. - - pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount. Databases can return the actual rowcount - or they can return -1 if it would help performance. MySQL seems to always return the correct rowcount. - """ - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, count) - - rows = self.cursor.fetchall() - self.assertEqual(len(rows), count) - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_reset(self): - "Ensure rowcount is reset to -1" - - # The Python DB API says that rowcount should be set to -1 and most ODBC drivers let us know there are no - # records. MySQL always returns 0, however. Without parsing the SQL (which we are not going to do), I'm not - # sure how we can tell the difference and set the value to -1. For now, I'll have this test check for 0. - - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.assertEqual(self.cursor.rowcount, 1) - - self.cursor.execute("create table t2(i int)") - self.assertEqual(self.cursor.rowcount, 0) - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_one(self): - "Pass executemany a single sequence" - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, "test") ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - # REVIEW: The following fails. Research. - - # def test_executemany_failure(self): - # """ - # Ensure that an exception is raised if one query in an executemany fails. - # """ - # self.cursor.execute("create table t1(a int, b varchar(10))") - # - # params = [ (1, 'good'), - # ('error', 'not an int'), - # (3, 'good') ] - # - # self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - def test_emoticons_as_parameter(self): - # https://github.com/mkleehammer/pyodbc/issues/423 - # - # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number - # of characters. Ensure it works even with 4-byte characters. - # - # http://www.fileformat.info/info/unicode/char/1f31c/index.htm - - v = u"x \U0001F31C z" - - self.cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") - self.cursor.execute("insert into t1 values (?)", v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def test_emoticons_as_literal(self): - # https://github.com/mkleehammer/pyodbc/issues/630 - - v = u"x \U0001F31C z" - - self.cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") - self.cursor.execute("insert into t1 values ('%s')" % v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - -def main(): - from optparse import OptionParser - parser = OptionParser(usage=usage) - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - - (options, args) = parser.parse_args() - - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - filename = basename(sys.argv[0]) - assert filename.endswith('.py') - connection_string = load_setup_connection_string(filename[:-3]) - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - if options.verbose: - cnxn = pyodbc.connect(connection_string) - print_library_info(cnxn) - cnxn.close() - - suite = load_tests(MySqlTestCase, options.test, connection_string) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - - add_to_path() - - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/pgtests.py b/tests2/pgtests.py deleted file mode 100755 index 471bd46e..00000000 --- a/tests2/pgtests.py +++ /dev/null @@ -1,615 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -usage = """\ -usage: %prog [options] connection_string - -Unit tests for PostgreSQL. To use, pass a connection string as the parameter. -The tests will create and drop tables t1 and t2 as necessary. - -These run using the version from the 'build' directory, not the version -installed into the Python directories. You must run python setup.py build -before running the tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [pgtests] - connection-string=DSN=PostgreSQL35W - -Note: Be sure to use the "Unicode" (not the "ANSI") version of the PostgreSQL ODBC driver. -""" - -import sys, os, re -import unittest -from decimal import Decimal -from testutils import * - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of composed of `seed` to make a string `length` characters long. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class PGTestCase(unittest.TestCase): - - # These are from the C++ code. Keep them up to date. - - # If we are reading a binary, string, or unicode value and do not know how large it is, we'll try reading 2K into a - # buffer on the stack. We then copy into a new Python object. - SMALL_READ = 100 - - # A read guaranteed not to fit in the MAX_STACK_STACK stack buffer, but small enough to be used for varchar (4K max). - LARGE_READ = 4000 - - SMALL_STRING = _generate_test_string(SMALL_READ) - LARGE_STRING = _generate_test_string(LARGE_READ) - - def __init__(self, connection_string, ansi, unicode_results, method_name): - unittest.TestCase.__init__(self, method_name) - self.connection_string = connection_string - self.ansi = ansi - self.unicode = unicode_results - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string, ansi=self.ansi) - self.cursor = self.cnxn.cursor() - - # I've set my test database to use UTF-8 which seems most popular. - self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') - self.cnxn.setencoding(str, encoding='utf-8') - self.cnxn.setencoding(unicode, encoding='utf-8') - - # As of psql 9.5.04 SQLGetTypeInfo returns absurdly small sizes leading - # to slow writes. Override them: - self.cnxn.maxwrite = 1024 * 1024 * 1024 - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - - def _test_strtype(self, sqltype, value, colsize=None, resulttype=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - - self.cursor.execute("select * from t1") - row = self.cursor.fetchone() - result = row[0] - - if resulttype and type(value) is not resulttype: - value = resulttype(value) - - self.assertEqual(result, value) - - - def test_maxwrite(self): - # If we write more than `maxwrite` bytes, pyodbc will switch from - # binding the data all at once to providing it at execute time with - # SQLPutData. The default maxwrite is 1GB so this is rarely needed in - # PostgreSQL but I need to test the functionality somewhere. - self.cnxn.maxwrite = 300 - self._test_strtype('varchar', unicode(_generate_test_string(400), 'utf-8')) - - # - # varchar - # - - def test_empty_varchar(self): - self._test_strtype('varchar', u'', self.SMALL_READ) - - def test_null_varchar(self): - self._test_strtype('varchar', None, self.SMALL_READ) - - def test_large_null_varchar(self): - # There should not be a difference, but why not find out? - self._test_strtype('varchar', None, self.LARGE_READ) - - def test_small_varchar(self): - self._test_strtype('varchar', unicode(self.SMALL_STRING), self.SMALL_READ) - - def test_large_varchar(self): - self._test_strtype('varchar', unicode(self.LARGE_STRING), self.LARGE_READ) - - def test_varchar_many(self): - self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") - - v1 = 'ABCDEFGHIJ' * 30 - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); - row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() - - self.assertEqual(v1, row.c1) - self.assertEqual(v2, row.c2) - self.assertEqual(v3, row.c3) - - def test_varchar_bytes(self): - # Write non-unicode data to a varchar field. - self._test_strtype('varchar', self.SMALL_STRING, self.SMALL_READ) - - - def test_small_decimal(self): - # value = Decimal('1234567890987654321') - value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) - self.cursor.execute("create table t1(d numeric(19))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def test_small_decimal_scale(self): - # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation - # example in the C Data Types appendix. - value = '1000.10' - value = Decimal(value) - self.cursor.execute("create table t1(d numeric(20,6))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def test_negative_decimal_scale(self): - value = Decimal('-10.0010') - self.cursor.execute("create table t1(d numeric(19,4))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_string(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_fixed_str(self): - value = "testing" - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_raw_encoding(self): - # Read something that is valid ANSI and make sure it comes through. - # The database is actually going to send us UTF-8 so don't use extended - # characters. - # - # REVIEW: Is there a good way to write UTF-8 into the database and read - # it out? - self.cnxn.setencoding(str, encoding='raw') - - expected = "testing" - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values (?)", expected) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(result, expected) - - def test_raw_decoding(self): - # Read something that is valid ANSI and make sure it comes through. - # The database is actually going to send us UTF-8 so don't use extended - # characters. - # - # REVIEW: Is there a good way to write UTF-8 into the database and read - # it out? - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='raw') - self._test_strtype('varchar', self.SMALL_STRING) - - def test_setdecoding(self): - # Force the result to be a string instead of unicode object. I'm not - # sure how to change the encoding for a single column. (Though I'm - # glad you can't - the communications encoding should not depend on - # per-column encoding like MySQL uses.) - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf8', to=str) - self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf8', to=str) - self._test_strtype('varchar', 'test', self.SMALL_READ) - - def test_unicode_latin(self): - value = u"x-\u00C2-y" # A hat : Â - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", value) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(result, value) - - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, 4) - - # PostgreSQL driver fails here? - # def test_rowcount_reset(self): - # "Ensure rowcount is reset to -1" - # - # self.cursor.execute("create table t1(i int)") - # count = 4 - # for i in range(count): - # self.cursor.execute("insert into t1 values (?)", i) - # self.assertEqual(self.cursor.rowcount, 1) - # - # self.cursor.execute("create table t2(i int)") - # self.assertEqual(self.cursor.rowcount, -1) - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - # REVIEW: Without the cast, we get the following error: - # [07006] [unixODBC]Received an unsupported type from Postgres.;\nERROR: table "t2" does not exist (14) - - count = self.cursor.execute("select cast(count(*) as int) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_failure(self): - """ - Ensure that an exception is raised if one query in an executemany fails. - """ - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, 'good'), - ('error', 'not an int'), - (3, 'good') ] - - self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_executemany_generator(self): - self.cursor.execute("create table t1(a int)") - - self.cursor.executemany("insert into t1(a) values (?)", ((i,) for i in range(4))) - - row = self.cursor.execute("select min(a) mina, max(a) maxa from t1").fetchone() - - self.assertEqual(row.mina, 0) - self.assertEqual(row.maxa, 3) - - - def test_executemany_iterator(self): - self.cursor.execute("create table t1(a int)") - - values = [ (i,) for i in range(4) ] - - self.cursor.executemany("insert into t1(a) values (?)", iter(values)) - - row = self.cursor.execute("select min(a) mina, max(a) maxa from t1").fetchone() - - self.assertEqual(row.mina, 0) - self.assertEqual(row.maxa, 3) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_pickling(self): - row = self.cursor.execute("select 1 a, 'two' b").fetchone() - - import pickle - s = pickle.dumps(row) - - other = pickle.loads(s) - - self.assertEqual(row, other) - - - def test_int_limits(self): - values = [ (-sys.maxint - 1), -1, 0, 1, 3230392212, sys.maxint ] - - self.cursor.execute("create table t1(a bigint)") - - for value in values: - self.cursor.execute("delete from t1") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select a from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_emoticons_as_parameter(self): - # https://github.com/mkleehammer/pyodbc/issues/423 - # - # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number - # of characters. Ensure it works even with 4-byte characters. - # - # http://www.fileformat.info/info/unicode/char/1f31c/index.htm - - v = "x \U0001F31C z" - - self.cursor.execute("CREATE TABLE t1(s varchar(100))") - self.cursor.execute("insert into t1 values (?)", v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def test_emoticons_as_literal(self): - # https://github.com/mkleehammer/pyodbc/issues/630 - - v = "x \U0001F31C z" - - self.cursor.execute("CREATE TABLE t1(s varchar(100))") - self.cursor.execute("insert into t1 values ('%s')" % v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def test_cursor_messages(self): - """ - Test the Cursor.messages attribute. - """ - # self.cursor is used in setUp, hence is not brand new at this point - brand_new_cursor = self.cnxn.cursor() - self.assertIsNone(brand_new_cursor.messages) - - # using INFO message level because they are always sent to the client regardless of - # client_min_messages: https://www.postgresql.org/docs/11/runtime-config-client.html - for msg in ('hello world', 'ABCDEFGHIJ' * 400): - self.cursor.execute(""" - CREATE OR REPLACE PROCEDURE test_cursor_messages() - LANGUAGE plpgsql - AS $$ - BEGIN - RAISE INFO '{}' USING ERRCODE = '01000'; - END; - $$; - """.format(msg)) - self.cursor.execute("CALL test_cursor_messages();") - messages = self.cursor.messages - self.assertTrue(type(messages) is list) - self.assertTrue(len(messages) > 0) - self.assertTrue(all(type(m) is tuple for m in messages)) - self.assertTrue(all(len(m) == 2 for m in messages)) - self.assertTrue(all(type(m[0]) is unicode for m in messages)) - self.assertTrue(all(type(m[1]) is unicode for m in messages)) - self.assertTrue(all(m[0] == '[01000] (-1)' for m in messages)) - self.assertTrue(''.join(m[1] for m in messages).endswith(msg)) - - -def main(): - from optparse import OptionParser - parser = OptionParser(usage="usage: %prog [options] connection_string") - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - parser.add_option('-a', '--ansi', help='ANSI only', default=False, action='store_true') - parser.add_option('-u', '--unicode', help='Expect results in Unicode', default=False, action='store_true') - - (options, args) = parser.parse_args() - - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - connection_string = load_setup_connection_string('pgtests') - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - if options.verbose: - cnxn = pyodbc.connect(connection_string, ansi=options.ansi) - print_library_info(cnxn) - cnxn.close() - - if options.test: - # Run a single test - if not options.test.startswith('test_'): - options.test = 'test_%s' % (options.test) - - s = unittest.TestSuite([ PGTestCase(connection_string, options.ansi, options.unicode, options.test) ]) - else: - # Run all tests in the class - - methods = [ m for m in dir(PGTestCase) if m.startswith('test_') ] - methods.sort() - s = unittest.TestSuite([ PGTestCase(connection_string, options.ansi, options.unicode, m) for m in methods ]) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(s) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - - add_to_path() - - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/sqldwtests.py b/tests2/sqldwtests.py deleted file mode 100644 index 95b50300..00000000 --- a/tests2/sqldwtests.py +++ /dev/null @@ -1,1499 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -from __future__ import print_function - -usage = """\ -usage: %prog [options] connection_string - -Unit tests for Azure SQL DW. To use, pass a connection string as the parameter. -The tests will create and drop tables t1 and t2 as necessary. - -These run using the version from the 'build' directory, not the version -installed into the Python directories. You must run python setup.py build -before running the tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [sqldwtests] - connection-string=DRIVER={SQL Server};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db - -The connection string above will use the 2000/2005 driver, even if SQL Server 2008 -is installed: - - 2000: DRIVER={SQL Server} - 2005: DRIVER={SQL Server} - 2008: DRIVER={SQL Server Native Client 10.0} - -If using FreeTDS ODBC, be sure to use version 1.00.97 or newer. -""" - -import sys, os, re, uuid -import unittest -from decimal import Decimal -from datetime import datetime, date, time -from os.path import join, getsize, dirname, abspath -from warnings import warn -from testutils import * - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class SqlServerTestCase(unittest.TestCase): - - SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] - LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] - MAX_FENCEPOST_SIZES = [ 5 * 1024 * 1024 ] #, 50 * 1024 * 1024 ] - - ANSI_SMALL_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] - UNICODE_SMALL_FENCEPOSTS = [ unicode(s) for s in ANSI_SMALL_FENCEPOSTS ] - ANSI_LARGE_FENCEPOSTS = ANSI_SMALL_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] - UNICODE_LARGE_FENCEPOSTS = UNICODE_SMALL_FENCEPOSTS + [ unicode(s) for s in [_generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ]] - - ANSI_MAX_FENCEPOSTS = ANSI_LARGE_FENCEPOSTS + [ _generate_test_string(size) for size in MAX_FENCEPOST_SIZES ] - UNICODE_MAX_FENCEPOSTS = UNICODE_LARGE_FENCEPOSTS + [ unicode(s) for s in [_generate_test_string(size) for size in MAX_FENCEPOST_SIZES ]] - - - def __init__(self, method_name, connection_string): - unittest.TestCase.__init__(self, method_name) - self.connection_string = connection_string - - def driver_type_is(self, type_name): - recognized_types = { - 'msodbcsql': '(Microsoft) ODBC Driver xx for SQL Server', - 'freetds': 'FreeTDS ODBC', - } - if not type_name in recognized_types.keys(): - raise KeyError('"{0}" is not a recognized driver type: {1}'.format(type_name, list(recognized_types.keys()))) - driver_name = self.cnxn.getinfo(pyodbc.SQL_DRIVER_NAME).lower() - if type_name == 'msodbcsql': - return ('msodbcsql' in driver_name) or ('sqlncli' in driver_name) or ('sqlsrv32.dll' == driver_name) - elif type_name == 'freetds': - return ('tdsodbc' in driver_name) - - def get_sqlserver_version(self): - """ - Returns the major version: 8-->2000, 9-->2005, 10-->2008 - """ - self.cursor.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') AS VARCHAR(255))") - row = self.cursor.fetchone() - return int(row[0].split('.', 1)[0]) - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string) - self.cursor = self.cnxn.cursor() - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - except: - pass - - for i in range(3): - try: - self.cursor.execute("drop procedure proc%d" % i) - except: - pass - - try: - self.cursor.execute('drop function func1') - except: - pass - - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_binary_type(self): - if sys.hexversion >= 0x02060000: - self.assertTrue(pyodbc.BINARY is bytearray) - else: - self.assertTrue(pyodbc.BINARY is buffer) - - def test_multiple_bindings(self): - "More than one bind and select on a cursor" - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t1 values (?)", 2) - self.cursor.execute("insert into t1 values (?)", 3) - for i in range(3): - self.cursor.execute("select n from t1 where n < ?", 10) - self.cursor.execute("select n from t1 where n < 3") - - - def test_different_bindings(self): - self.cursor.execute("create table t1(n int)") - self.cursor.execute("create table t2(d datetime)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t2 values (?)", datetime.now()) - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - def test_noscan(self): - self.assertEqual(self.cursor.noscan, False) - self.cursor.noscan = True - self.assertEqual(self.cursor.noscan, True) - - def test_nextset(self): - self.cursor.execute("create table t1(i int)") - for i in range(4): - self.cursor.execute("insert into t1(i) values(?)", i) - - self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") - - for i, row in enumerate(self.cursor): - self.assertEqual(i, row.i) - - self.assertEqual(self.cursor.nextset(), True) - - for i, row in enumerate(self.cursor): - self.assertEqual(i + 2, row.i) - - def test_nextset_with_raiserror(self): - self.cursor.execute("select i = 1; RAISERROR('c', 16, 1);") - row = next(self.cursor) - self.assertEqual(1, row.i) - if self.driver_type_is('freetds'): - warn('FREETDS_KNOWN_ISSUE - test_nextset_with_raiserror: test cancelled.') - # AssertionError: ProgrammingError not raised by nextset - # https://github.com/FreeTDS/freetds/issues/230 - return # for now - self.assertRaises(pyodbc.ProgrammingError, self.cursor.nextset) - - def test_fixed_unicode(self): - value = u"t\xebsting" - self.cursor.execute("create table t1(s nchar(7))") - self.cursor.execute("insert into t1 values(?)", u"t\xebsting") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), unicode) - self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL - self.assertEqual(v, value) - - - def _test_strtype(self, sqltype, value, resulttype=None, colsize=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize in (None, 'max') or isinstance(colsize, int), colsize - assert colsize in (None, 'max') or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s)) with (heap)" % (sqltype, colsize) - else: - sql = "create table t1(s %s) with (heap)" % sqltype - self.cursor.execute(sql) - - if resulttype is None: - resulttype = type(value) - - sql = "insert into t1 values(?)" - try: - if colsize == 'max': - if sqltype == 'varbinary': - sqlbind = pyodbc.SQL_VARBINARY - elif sqltype == 'varchar': - sqlbind = pyodbc.SQL_VARCHAR - else: - sqlbind = pyodbc.SQL_WVARCHAR - self.cursor.setinputsizes([(sqlbind, 0, 0)]) - elif (sqltype == 'nvarchar' or sqltype == 'varchar') and colsize != 'max' and colsize > 2000: - self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 0, 0)]) - else: - self.cursor.setinputsizes(None) - self.cursor.execute(sql, value) - except pyodbc.DataError: - if self.driver_type_is('freetds'): - # FREETDS_KNOWN_ISSUE - # - # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so - # pyodbc can't call SQLDescribeParam to get the correct parameter type. - # This can lead to errors being returned from SQL Server when sp_prepexec is called, - # e.g., "Implicit conversion from data type varchar to varbinary is not allowed." - # for test_binary_null - # - # So at least verify that the user can manually specify the parameter type - if sqltype == 'varbinary': - sql_param_type = pyodbc.SQL_VARBINARY - # (add elif blocks for other cases as required) - self.cursor.setinputsizes([(sql_param_type, colsize, 0)]) - self.cursor.execute(sql, value) - else: - raise - v = self.cursor.execute("select * from t1").fetchone()[0] - - # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before - # comparing. - if type(value) is not resulttype: - value = resulttype(value) - - self.assertEqual(v, value) - - - def _test_strliketype(self, sqltype, value, resulttype=None, colsize=None): - """ - The implementation for text, image, ntext, and binary. - - These types do not support comparison operators. - """ - assert colsize is None or isinstance(colsize, int), colsize - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - if resulttype is None: - resulttype = type(value) - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(result), resulttype) - - # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before - # comparing. - if type(value) is not resulttype: - value = resulttype(value) - - self.assertEqual(result, value) - - - # - # varchar - # - - def test_varchar_null(self): - self._test_strtype('varchar', None, colsize=100) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, colsize=len(value)) - return t - for value in UNICODE_SMALL_FENCEPOSTS: - locals()['test_varchar_%s' % len(value)] = _maketest(value) - - # Also test varchar(max) - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, colsize='max') - return t - for value in UNICODE_MAX_FENCEPOSTS: - locals()['test_varcharmax_%s' % len(value)] = _maketest(value) - - def test_varchar_many(self): - self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") - - v1 = 'ABCDEFGHIJ' * 30 - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); - row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() - - self.assertEqual(v1, row.c1) - self.assertEqual(v2, row.c2) - self.assertEqual(v3, row.c3) - - def test_varchar_upperlatin(self): - self._test_strtype('varchar', u'\u00e5', colsize=1) - - # - # nvarchar - # - - def test_nvarchar_null(self): - self._test_strtype('nvarchar', None, colsize=100) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, colsize=len(value)) - return t - for value in UNICODE_SMALL_FENCEPOSTS: - locals()['test_nvarchar_%s' % len(value)] = _maketest(value) - - # Also test nvarchar(max) - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, colsize='max') - return t - for value in UNICODE_MAX_FENCEPOSTS: - locals()['test_nvarcharmax_%s' % len(value)] = _maketest(value) - - def test_unicode_upperlatin(self): - self._test_strtype('nvarchar', u'\u00e5', colsize=1) - - def test_unicode_longmax(self): - # Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes - - ver = self.get_sqlserver_version() - if ver < 9: # 2005+ - return # so pass / ignore - self.cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))") - - def test_fast_executemany_to_local_temp_table(self): - if self.driver_type_is('freetds'): - warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_to_local_temp_table: test cancelled.') - return - v = u'Ώπα' - self.cursor.execute("CREATE TABLE #issue295 (id INT, txt NVARCHAR(50))") - sql = "INSERT INTO #issue295 (txt) VALUES (?)" - params = [(v,)] - self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 50, 0)]) - self.cursor.fast_executemany = True - self.cursor.executemany(sql, params) - self.assertEqual(self.cursor.execute("SELECT txt FROM #issue295").fetchval(), v) - - # - # binary - # - - def test_binaryNull_object(self): - self.cursor.execute("create table t1(n varbinary(10))") - self.cursor.execute("insert into t1 values (?)", pyodbc.BinaryNull); - - # buffer - - def _maketest(value): - def t(self): - self._test_strtype('varbinary', buffer(value), resulttype=pyodbc.BINARY, colsize=len(value)) - return t - for value in ANSI_SMALL_FENCEPOSTS: - locals()['test_binary_buffer_%s' % len(value)] = _maketest(value) - - # bytearray - - if sys.hexversion >= 0x02060000: - def _maketest(value): - def t(self): - self._test_strtype('varbinary', bytearray(value), colsize=len(value)) - return t - for value in ANSI_SMALL_FENCEPOSTS: - locals()['test_binary_bytearray_%s' % len(value)] = _maketest(value) - - # varbinary(max) - def _maketest(value): - def t(self): - self._test_strtype('varbinary', buffer(value), resulttype=pyodbc.BINARY, colsize='max') - return t - for value in ANSI_MAX_FENCEPOSTS: - locals()['test_binarymax_buffer_%s' % len(value)] = _maketest(value) - - # bytearray - - if sys.hexversion >= 0x02060000: - def _maketest(value): - def t(self): - self._test_strtype('varbinary', bytearray(value), colsize='max') - return t - for value in ANSI_MAX_FENCEPOSTS: - locals()['test_binarymax_bytearray_%s' % len(value)] = _maketest(value) - - # - # image - # - - # - # text - # - - # def test_empty_text(self): - # self._test_strliketype('text', bytearray('')) - - # - # xml - # - - # def test_empty_xml(self): - # self._test_strliketype('xml', bytearray('')) - - # - # bit - # - - def test_bit(self): - value = True - self.cursor.execute("create table t1(b bit)") - self.cursor.execute("insert into t1 values (?)", value) - v = self.cursor.execute("select b from t1").fetchone()[0] - self.assertEqual(type(v), bool) - self.assertEqual(v, value) - - # - # decimal - # - - def _decimal(self, precision, scale, negative): - # From test provided by planders (thanks!) in Issue 91 - - self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) - - # Construct a decimal that uses the maximum precision and scale. - decStr = '9' * (precision - scale) - if scale: - decStr = decStr + "." + '9' * scale - if negative: - decStr = "-" + decStr - value = Decimal(decStr) - - self.cursor.execute("insert into t1 values(?)", value) - - v = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(v, value) - - def _maketest(p, s, n): - def t(self): - self._decimal(p, s, n) - return t - for (p, s, n) in [ (1, 0, False), - (1, 0, True), - (6, 0, False), - (6, 2, False), - (6, 4, True), - (6, 6, True), - (38, 0, False), - (38, 10, False), - (38, 38, False), - (38, 0, True), - (38, 10, True), - (38, 38, True) ]: - locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) - - - def test_decimal_e(self): - """Ensure exponential notation decimals are properly handled""" - value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 - self.cursor.execute("create table t1(d decimal(10, 2))") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_subquery_params(self): - """Ensure parameter markers work in a subquery""" - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - row = self.cursor.execute(""" - select x.id - from ( - select id - from t1 - where s = ? - and id between ? and ? - ) x - """, 'test', 1, 10).fetchone() - self.assertNotEqual(row, None) - self.assertEqual(row[0], 1) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_string(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_empty_string_encoding(self): - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') - value = "" - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_fixed_char(self): - value = "testing" - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_empty_unicode(self): - self.cursor.execute("create table t1(s nvarchar(20))") - self.cursor.execute("insert into t1 values(?)", u"") - - def test_empty_unicode_encoding(self): - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') - value = "" - self.cursor.execute("create table t1(s nvarchar(20))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_unicode_query(self): - self.cursor.execute(u"select 1") - - # From issue #206 - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, colsize=len(value)) - return t - locals()['test_chinese_param'] = _maketest(u'我的') - - def test_chinese(self): - v = u'我的' - self.cursor.execute(u"SELECT N'我的' AS [Name]") - row = self.cursor.fetchone() - self.assertEqual(row[0], v) - - self.cursor.execute(u"SELECT N'我的' AS [Name]") - rows = self.cursor.fetchall() - self.assertEqual(rows[0][0], v) - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - # - # date, time, datetime - # - - def test_datetime(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(value, result) - - def test_datetime_fraction(self): - # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime - # supported is xxx000. - - value = datetime(2007, 1, 15, 3, 4, 5, 123000) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(value, result) - - def test_datetime_fraction_rounded(self): - # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the - # database supports. - - full = datetime(2007, 1, 15, 3, 4, 5, 123456) - rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", full) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(rounded, result) - - def test_date(self): - ver = self.get_sqlserver_version() - if ver < 10: # 2008 only - return # so pass / ignore - - value = date.today() - - self.cursor.execute("create table t1(d date)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(type(result), date) - self.assertEqual(value, result) - - def test_time(self): - ver = self.get_sqlserver_version() - if ver < 10: # 2008 only - return # so pass / ignore - - value = datetime.now().time() - - # We aren't yet writing values using the new extended time type so the value written to the database is only - # down to the second. - value = value.replace(microsecond=0) - - self.cursor.execute("create table t1(t time)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select t from t1").fetchone()[0] - self.assertEqual(type(result), time) - self.assertEqual(value, result) - - def test_datetime2(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime2)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(value, result) - - # - # ints and floats - # - - def test_int(self): - value = 1234 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_int(self): - value = -1 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_bigint(self): - input = 3000000000 - self.cursor.execute("create table t1(d bigint)") - self.cursor.execute("insert into t1 values (?)", input) - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(result, input) - - def test_float(self): - value = 1234.567 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - - # - # stored procedures - # - - # def test_callproc(self): - # "callproc with a simple input-only stored procedure" - # pass - - def test_sp_results(self): - self.cursor.execute( - """ - Create procedure proc1 - AS - select top 10 name, id, xtype, refdate - from sysobjects - """) - rows = self.cursor.execute("exec proc1").fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - - def test_sp_results_from_temp(self): - - # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. - # If you don't do this, you'd need to call nextset() once to skip it. - - self.cursor.execute( - """ - Create procedure proc1 - AS - set nocount on - select top 10 name, id, xtype, refdate - into #tmptable - from sysobjects - - select * from #tmptable - """) - self.cursor.execute("exec proc1") - self.assertTrue(self.cursor.description is not None) - self.assertTrue(len(self.cursor.description) == 4) - - rows = self.cursor.fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - - def test_sp_with_dates(self): - # Reported in the forums that passing two datetimes to a stored procedure doesn't work. - self.cursor.execute( - """ - if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) - drop procedure [dbo].[test_sp] - """) - self.cursor.execute( - """ - create procedure test_sp(@d1 datetime, @d2 datetime) - AS - declare @d as int - set @d = datediff(year, @d1, @d2) - select @d - """) - self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(rows[0][0] == 0) # 0 years apart - - def test_sp_with_none(self): - # Reported in the forums that passing None caused an error. - self.cursor.execute( - """ - if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) - drop procedure [dbo].[test_sp] - """) - self.cursor.execute( - """ - create procedure test_sp(@x varchar(20)) - AS - declare @y varchar(20) - set @y = @x - select @y - """) - self.cursor.execute("exec test_sp ?", None) - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(rows[0][0] == None) # 0 years apart - - - # - # rowcount - # - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - """ - Ensure Cursor.rowcount is set properly after a select statement. - - pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a - select statement, so we'll test for that behavior. This is valid behavior according to the DB API - specification, but people don't seem to like it. - """ - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, -1) - - rows = self.cursor.fetchall() - self.assertEqual(len(rows), count) - self.assertEqual(self.cursor.rowcount, -1) - - def test_rowcount_reset(self): - "Ensure rowcount is reset after DDL" - - ddl_rowcount = 0 if self.driver_type_is('freetds') else -1 - - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.assertEqual(self.cursor.rowcount, 1) - - self.cursor.execute("create table t2(i int)") - self.assertEqual(self.cursor.rowcount, ddl_rowcount) - - # - # always return Cursor - # - - # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very - # confusing when things went wrong and added very little value even when things went right since users could always - # use: cursor.execute("...").rowcount - - def test_retcursor_delete(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_select(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("select * from t1") - self.assertEqual(v, self.cursor) - - # - # misc - # - - def table_with_spaces(self): - "Ensure we can select using [x z] syntax" - - try: - self.cursor.execute("create table [test one](int n)") - self.cursor.execute("insert into [test one] values(1)") - self.cursor.execute("select * from [test one]") - v = self.cursor.fetchone()[0] - self.assertEqual(v, 1) - finally: - self.cnxn.rollback() - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_temp_select(self): - # A project was failing to create temporary tables via select into. - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), unicode) - self.assertEqual(v, "testing") - - self.cursor.execute("select s into t2 from t1") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), unicode) - self.assertEqual(v, "testing") - - - def test_money(self): - d = Decimal('123456.78') - self.cursor.execute("create table t1(i int identity(1,1), m money)") - self.cursor.execute("insert into t1(m) values (?)", d) - v = self.cursor.execute("select m from t1").fetchone()[0] - self.assertEqual(v, d) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_one(self): - "Pass executemany a single sequence" - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, "test") ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - def test_executemany_dae_0(self): - """ - DAE for 0-length value - """ - self.cursor.execute("create table t1(a nvarchar(max)) with (heap)") - - self.cursor.fast_executemany = True - self.cursor.executemany("insert into t1(a) values(?)", [['']]) - - self.assertEqual(self.cursor.execute("select a from t1").fetchone()[0], '') - - self.cursor.fast_executemany = False - - def test_executemany_failure(self): - """ - Ensure that an exception is raised if one query in an executemany fails. - """ - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, 'good'), - ('error', 'not an int'), - (3, 'good') ] - - self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_concatenation(self): - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") - self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) - - row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() - - self.assertEqual(row.both, v2 + v3) - - def test_view_select(self): - # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. - - # Create a table (t1) with 3 rows and a view (t2) into it. - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") - for i in range(3): - self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) - self.cursor.execute("create view t2 as select * from t1") - - # Select from the view - self.cursor.execute("select * from t2") - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(len(rows) == 3) - self.cursor.execute("drop view t2") - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - def test_cursorcommit(self): - "Ensure cursor.commit works" - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - othercursor = othercnxn.cursor() - othercnxn = None - - othercursor.execute("create table t1(s varchar(20))") - othercursor.execute("insert into t1 values(?)", 'test') - othercursor.commit() - - value = self.cursor.execute("select s from t1").fetchone()[0] - self.assertEqual(value, 'test') - - - def test_unicode_results(self): - "Ensure unicode_results forces Unicode" - othercnxn = pyodbc.connect(self.connection_string, unicode_results=True, autocommit=True) - othercursor = othercnxn.cursor() - - # ANSI data in an ANSI column ... - othercursor.execute("create table t1(s varchar(20))") - othercursor.execute("insert into t1 values(?)", 'test') - - # ... should be returned as Unicode - value = othercursor.execute("select s from t1").fetchone()[0] - self.assertEqual(value, u'test') - - - - def test_skip(self): - # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. - - self.cursor.execute("create table t1(id int)"); - for i in range(1, 5): - self.cursor.execute("insert into t1 values(?)", i) - self.cursor.execute("select id from t1 order by id") - self.assertEqual(self.cursor.fetchone()[0], 1) - self.cursor.skip(2) - self.assertEqual(self.cursor.fetchone()[0], 4) - - def test_timeout(self): - self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) - - self.cnxn.timeout = 30 - self.assertEqual(self.cnxn.timeout, 30) - - self.cnxn.timeout = 0 - self.assertEqual(self.cnxn.timeout, 0) - - def test_sets_execute(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) - - self.assertRaises(pyodbc.ProgrammingError, f) - - def test_sets_executemany(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.executemany("insert into t1 (word) values (?)", [words]) - - self.assertRaises(TypeError, f) - - def test_row_execute(self): - "Ensure we can use a Row object as a parameter to execute" - self.cursor.execute("create table t1(n int, s varchar(10))") - self.cursor.execute("insert into t1 values (1, 'a')") - row = self.cursor.execute("select n, s from t1").fetchone() - self.assertNotEqual(row, None) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.execute("insert into t2 values (?, ?)", row) - - def test_row_executemany(self): - "Ensure we can use a Row object as a parameter to executemany" - self.cursor.execute("create table t1(n int, s varchar(10))") - - for i in range(3): - self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) - - rows = self.cursor.execute("select n, s from t1").fetchall() - self.assertNotEqual(len(rows), 0) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.executemany("insert into t2 values (?, ?)", rows) - - def test_description(self): - "Ensure cursor.description is correct" - - self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") - self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") - self.cursor.execute("select * from t1") - - # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the - # items I do know. - - # int - t = self.cursor.description[0] - self.assertEqual(t[0], 'n') - self.assertEqual(t[1], int) - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # varchar(8) - t = self.cursor.description[1] - self.assertEqual(t[0], 's') - self.assertEqual(t[1], str) - self.assertEqual(t[4], 8) # precision - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # decimal(5, 2) - t = self.cursor.description[2] - self.assertEqual(t[0], 'd') - self.assertEqual(t[1], Decimal) - self.assertEqual(t[4], 5) # precision - self.assertEqual(t[5], 2) # scale - self.assertEqual(t[6], True) # nullable - - - def test_none_param(self): - "Ensure None can be used for params other than the first" - # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used - # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the - # first column, but did not work for later columns. - # - # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, - # binary/varbinary won't allow an implicit conversion. - - self.cursor.execute("create table t1(n int, blob varbinary(max)) with(heap)") - self.cursor.execute("insert into t1 values (1, 0x1234)") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row.n, 1) - self.assertEqual(type(row.blob), bytearray) - - sql = "update t1 set n=?, blob=?" - try: - self.cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) - self.cursor.execute(sql, 2, None) - except pyodbc.DataError: - if self.driver_type_is('freetds'): - # FREETDS_KNOWN_ISSUE - # - # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so - # pyodbc can't call SQLDescribeParam to get the correct parameter type. - # This can lead to errors being returned from SQL Server when sp_prepexec is called, - # e.g., "Implicit conversion from data type varchar to varbinary(max) is not allowed." - # - # So at least verify that the user can manually specify the parameter type - self.cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) - self.cursor.execute(sql, 2, None) - else: - raise - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row.n, 2) - self.assertEqual(row.blob, None) - - - def test_output_conversion(self): - def convert(value): - # `value` will be a string. We'll simply add an X at the beginning at the end. - return 'X' + value + 'X' - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) - self.cursor.execute("create table t1(n int, v varchar(10))") - self.cursor.execute("insert into t1 values (1, '123.45')") - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - - # Now clear the conversions and try again. There should be no Xs this time. - self.cnxn.clear_output_converters() - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - - - def test_too_large(self): - """Ensure error raised if insert fails due to truncation""" - value = 'x' * 1000 - self.cursor.execute("create table t1(s varchar(800))") - def test(): - self.cursor.execute("insert into t1 values (?)", value) - self.assertRaises(pyodbc.DataError, test) - - def test_login_timeout(self): - # This can only test setting since there isn't a way to cause it to block on the server side. - cnxns = pyodbc.connect(self.connection_string, timeout=2) - - def test_row_equal(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test')") - row1 = self.cursor.execute("select n, s from t1").fetchone() - row2 = self.cursor.execute("select n, s from t1").fetchone() - b = (row1 == row2) - self.assertEqual(b, True) - - def test_row_gtlt(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test1')") - self.cursor.execute("insert into t1 values (1, 'test2')") - rows = self.cursor.execute("select n, s from t1 order by s").fetchall() - self.assertTrue(rows[0] < rows[1]) - self.assertTrue(rows[0] <= rows[1]) - self.assertTrue(rows[1] > rows[0]) - self.assertTrue(rows[1] >= rows[0]) - self.assertTrue(rows[0] != rows[1]) - - rows = list(rows) - rows.sort() # uses < - - def test_context_manager_success(self): - """ - Ensure a successful with statement causes a commit. - """ - self.cursor.execute("create table t1(n int)") - - with pyodbc.connect(self.connection_string) as cnxn: - cursor = cnxn.cursor() - cursor.execute("insert into t1 values (1)") - - cnxn = None - cursor = None - - rows = self.cursor.execute("select n from t1").fetchall() - self.assertEqual(len(rows), 1) - self.assertEqual(rows[0][0], 1) - - - def test_context_manager_fail(self): - """ - Ensure an exception in a with statement causes a rollback. - """ - self.cursor.execute("create table t1(n int)") - - try: - with pyodbc.connect(self.connection_string) as cnxn: - cursor = cnxn.cursor() - cursor.execute("insert into t1 values (1)") - raise Exception("Testing failure") - except Exception: - pass - - cnxn = None - cursor = None - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, 0) - - - def test_cursor_context_manager_success(self): - """ - Ensure a successful with statement using a cursor causes a commit. - """ - self.cursor.execute("create table t1(n int)") - - with pyodbc.connect(self.connection_string).cursor() as cursor: - cursor.execute("insert into t1 values (1)") - - cursor = None - - rows = self.cursor.execute("select n from t1").fetchall() - self.assertEqual(len(rows), 1) - self.assertEqual(rows[0][0], 1) - - - def test_cursor_context_manager_fail(self): - """ - Ensure an exception in a with statement using a cursor causes a rollback. - """ - self.cursor.execute("create table t1(n int)") - - try: - with pyodbc.connect(self.connection_string).cursor() as cursor: - cursor.execute("insert into t1 values (1)") - raise Exception("Testing failure") - except Exception: - pass - - cursor = None - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, 0) - - - def test_untyped_none(self): - # From issue 129 - value = self.cursor.execute("select ?", None).fetchone()[0] - self.assertEqual(value, None) - - def test_large_update_nodata(self): - self.cursor.execute('create table t1(a varbinary(max)) with(heap)') - hundredkb = bytearray('x'*100*1024) - self.cursor.setinputsizes([(pyodbc.SQL_VARBINARY,0,0)]) - self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) - - - def test_no_fetch(self): - # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to - # confuse the driver. - self.cursor.execute('select 1') - self.cursor.execute('select 1') - self.cursor.execute('select 1') - - def test_drivers(self): - drivers = pyodbc.drivers() - self.assertEqual(list, type(drivers)) - self.assertTrue(len(drivers) > 0) - - m = re.search('DRIVER={?([^}]+?)}?;', self.connection_string, re.IGNORECASE) - current = m.group(1) - self.assertTrue(current in drivers) - - def test_prepare_cleanup(self): - # When statement is prepared, it is kept in case the next execute uses the same statement. This must be - # removed when a non-execute statement is used that returns results, such as SQLTables. - - self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") - self.cursor.fetchone() - - self.cursor.tables("bogus") - - self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") - self.cursor.fetchone() - - def test_emoticons(self): - # https://github.com/mkleehammer/pyodbc/issues/423 - # - # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number - # of characters. Ensure it works even with 4-byte characters. - # - # http://www.fileformat.info/info/unicode/char/1f31c/index.htm - - v = "x \U0001F31C z" - - self.cursor.execute("create table t1(s varchar(100))") - self.cursor.execute("insert into t1 values (?)", v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - -def main(): - from optparse import OptionParser - parser = OptionParser(usage=usage) - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - - (options, args) = parser.parse_args() - - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - connection_string = load_setup_connection_string('sqldwtests') - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - if options.verbose: - cnxn = pyodbc.connect(connection_string) - print_library_info(cnxn) - cnxn.close() - - suite = load_tests(SqlServerTestCase, options.test, connection_string) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - - add_to_path() - - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/sqlite.db b/tests2/sqlite.db deleted file mode 100644 index ccd00e50..00000000 Binary files a/tests2/sqlite.db and /dev/null differ diff --git a/tests2/sqlitetests.py b/tests2/sqlitetests.py deleted file mode 100755 index b402d19e..00000000 --- a/tests2/sqlitetests.py +++ /dev/null @@ -1,722 +0,0 @@ -#!/usr/bin/python -# -*- coding: latin-1 -*- - -usage = """\ -usage: %prog [options] connection_string - -Unit tests for SQLite using the ODBC driver from http://www.ch-werner.de/sqliteodbc - -To use, pass a connection string as the parameter. The tests will create and -drop tables t1 and t2 as necessary. On Windows, use the 32-bit driver with -32-bit Python and the 64-bit driver with 64-bit Python (regardless of your -operating system bitness). - -These run using the version from the 'build' directory, not the version -installed into the Python directories. You must run python setup.py build -before running the tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [sqlitetests] - connection-string=Driver=SQLite3 ODBC Driver;Database=sqlite.db -""" - -import sys, os, re -import unittest -from decimal import Decimal -from datetime import datetime, date, time -from os.path import join, getsize, dirname, abspath -from testutils import * - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class SqliteTestCase(unittest.TestCase): - - SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] - LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] - - ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] - UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] - IMAGE_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] - - def __init__(self, method_name, connection_string): - unittest.TestCase.__init__(self, method_name) - self.connection_string = connection_string - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string) - self.cursor = self.cnxn.cursor() - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_multiple_bindings(self): - "More than one bind and select on a cursor" - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t1 values (?)", 2) - self.cursor.execute("insert into t1 values (?)", 3) - for i in range(3): - self.cursor.execute("select n from t1 where n < ?", 10) - self.cursor.execute("select n from t1 where n < 3") - - - def test_different_bindings(self): - self.cursor.execute("create table t1(n int)") - self.cursor.execute("create table t2(d datetime)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t2 values (?)", datetime.now()) - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - def test_fixed_unicode(self): - value = u"t\xebsting" - self.cursor.execute("create table t1(s nchar(7))") - self.cursor.execute("insert into t1 values(?)", u"t\xebsting") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), unicode) - self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL - self.assertEqual(v, value) - - - def _test_strtype(self, sqltype, value, colsize=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), type(value)) - - if value is not None: - self.assertEqual(len(v), len(value)) - - self.assertEqual(v, value) - - # Reported by Andy Hochhaus in the pyodbc group: In 2.1.7 and earlier, a hardcoded length of 255 was used to - # determine whether a parameter was bound as a SQL_VARCHAR or SQL_LONGVARCHAR. Apparently SQL Server chokes if - # we bind as a SQL_LONGVARCHAR and the target column size is 8000 or less, which is considers just SQL_VARCHAR. - # This means binding a 256 character value would cause problems if compared with a VARCHAR column under - # 8001. We now use SQLGetTypeInfo to determine the time to switch. - # - # [42000] [Microsoft][SQL Server Native Client 10.0][SQL Server]The data types varchar and text are incompatible in the equal to operator. - - self.cursor.execute("select * from t1 where s=?", value) - - - def _test_strliketype(self, sqltype, value, colsize=None): - """ - The implementation for text, image, ntext, and binary. - - These types do not support comparison operators. - """ - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), type(value)) - - if value is not None: - self.assertEqual(len(v), len(value)) - - self.assertEqual(v, value) - - # - # text - # - - def test_text_null(self): - self._test_strtype('text', None, 100) - - # Generate a test for each fencepost size: test_text_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('text', value, len(value)) - return t - for value in UNICODE_FENCEPOSTS: - locals()['test_text_%s' % len(value)] = _maketest(value) - - def test_text_upperlatin(self): - self._test_strtype('varchar', u'') - - # - # blob - # - - def test_null_blob(self): - self._test_strtype('blob', None, 100) - - def test_large_null_blob(self): - # Bug 1575064 - self._test_strtype('blob', None, 4000) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('blob', bytearray(value), len(value)) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_blob_%s' % len(value)] = _maketest(value) - - def test_subquery_params(self): - """Ensure parameter markers work in a subquery""" - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - row = self.cursor.execute(""" - select x.id - from ( - select id - from t1 - where s = ? - and id between ? and ? - ) x - """, 'test', 1, 10).fetchone() - self.assertNotEqual(row, None) - self.assertEqual(row[0], 1) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_unicode(self): - self.cursor.execute("create table t1(s nvarchar(20))") - self.cursor.execute("insert into t1 values(?)", u"") - - def test_unicode_query(self): - self.cursor.execute(u"select 1") - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - # - # ints and floats - # - - def test_int(self): - value = 1234 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_int(self): - value = -1 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_bigint(self): - input = 3000000000 - self.cursor.execute("create table t1(d bigint)") - self.cursor.execute("insert into t1 values (?)", input) - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(result, input) - - def test_negative_bigint(self): - # Issue 186: BIGINT problem on 32-bit architeture - input = -430000000 - self.cursor.execute("create table t1(d bigint)") - self.cursor.execute("insert into t1 values (?)", input) - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(result, input) - - def test_float(self): - value = 1234.567 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - # - # rowcount - # - - # Note: SQLRowCount does not define what the driver must return after a select statement - # and says that its value should not be relied upon. The sqliteodbc driver is hardcoded to - # return 0 so I've deleted the test. - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very - # confusing when things went wrong and added very little value even when things went right since users could always - # use: cursor.execute("...").rowcount - - def test_retcursor_delete(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_select(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("select * from t1") - self.assertEqual(v, self.cursor) - - # - # misc - # - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_one(self): - "Pass executemany a single sequence" - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, "test") ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_failure(self): - """ - Ensure that an exception is raised if one query in an executemany fails. - """ - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, 'good'), - ('error', 'not an int'), - (3, 'good') ] - - self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_view_select(self): - # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. - - # Create a table (t1) with 3 rows and a view (t2) into it. - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") - for i in range(3): - self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) - self.cursor.execute("create view t2 as select * from t1") - - # Select from the view - self.cursor.execute("select * from t2") - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(len(rows) == 3) - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - def test_unicode_results(self): - "Ensure unicode_results forces Unicode" - othercnxn = pyodbc.connect(self.connection_string, unicode_results=True) - othercursor = othercnxn.cursor() - - # ANSI data in an ANSI column ... - othercursor.execute("create table t1(s varchar(20))") - othercursor.execute("insert into t1 values(?)", 'test') - - # ... should be returned as Unicode - value = othercursor.execute("select s from t1").fetchone()[0] - self.assertEqual(value, u'test') - - def test_skip(self): - # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. - - self.cursor.execute("create table t1(id int)"); - for i in range(1, 5): - self.cursor.execute("insert into t1 values(?)", i) - self.cursor.execute("select id from t1 order by id") - self.assertEqual(self.cursor.fetchone()[0], 1) - self.cursor.skip(2) - self.assertEqual(self.cursor.fetchone()[0], 4) - - def test_sets_execute(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) - - self.assertRaises(pyodbc.ProgrammingError, f) - - def test_sets_executemany(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.executemany("insert into t1 (word) values (?)", [words]) - - self.assertRaises(TypeError, f) - - def test_row_execute(self): - "Ensure we can use a Row object as a parameter to execute" - self.cursor.execute("create table t1(n int, s varchar(10))") - self.cursor.execute("insert into t1 values (1, 'a')") - row = self.cursor.execute("select n, s from t1").fetchone() - self.assertNotEqual(row, None) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.execute("insert into t2 values (?, ?)", row) - - def test_row_executemany(self): - "Ensure we can use a Row object as a parameter to executemany" - self.cursor.execute("create table t1(n int, s varchar(10))") - - for i in range(3): - self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) - - rows = self.cursor.execute("select n, s from t1").fetchall() - self.assertNotEqual(len(rows), 0) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.executemany("insert into t2 values (?, ?)", rows) - - def test_description(self): - "Ensure cursor.description is correct" - - self.cursor.execute("create table t1(n int, s text)") - self.cursor.execute("insert into t1 values (1, 'abc')") - self.cursor.execute("select * from t1") - - # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the - # items I do know. - - # int - t = self.cursor.description[0] - self.assertEqual(t[0], 'n') - self.assertEqual(t[1], int) - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # text - t = self.cursor.description[1] - self.assertEqual(t[0], 's') - self.assertEqual(t[1], str) - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - def test_row_equal(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test')") - row1 = self.cursor.execute("select n, s from t1").fetchone() - row2 = self.cursor.execute("select n, s from t1").fetchone() - b = (row1 == row2) - self.assertEqual(b, True) - - def test_row_gtlt(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test1')") - self.cursor.execute("insert into t1 values (1, 'test2')") - rows = self.cursor.execute("select n, s from t1 order by s").fetchall() - self.assertTrue(rows[0] < rows[1]) - self.assertTrue(rows[0] <= rows[1]) - self.assertTrue(rows[1] > rows[0]) - self.assertTrue(rows[1] >= rows[0]) - self.assertTrue(rows[0] != rows[1]) - - rows = list(rows) - rows.sort() # uses < - - def _test_context_manager(self): - # TODO: This is failing, but it may be due to the design of sqlite. I've disabled it - # for now until I can research it some more. - - # WARNING: This isn't working right now. We've set the driver's autocommit to "off", - # but that doesn't automatically start a transaction. I'm not familiar enough with the - # internals of the driver to tell what is going on, but it looks like there is support - # for the autocommit flag. - # - # I thought it might be a timing issue, like it not actually starting a txn until you - # try to do something, but that doesn't seem to work either. I'll leave this in to - # remind us that it isn't working yet but we need to contact the SQLite ODBC driver - # author for some guidance. - - with pyodbc.connect(self.connection_string) as cnxn: - cursor = cnxn.cursor() - cursor.execute("begin") - cursor.execute("create table t1(i int)") - cursor.execute('rollback') - - # The connection should be closed now. - def test(): - cnxn.execute('rollback') - self.assertRaises(pyodbc.Error, test) - - def test_untyped_none(self): - # From issue 129 - value = self.cursor.execute("select ?", None).fetchone()[0] - self.assertEqual(value, None) - - def test_large_update_nodata(self): - self.cursor.execute('create table t1(a blob)') - hundredkb = 'x'*100*1024 - self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) - - def test_no_fetch(self): - # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to - # confuse the driver. - self.cursor.execute('select 1') - self.cursor.execute('select 1') - self.cursor.execute('select 1') - - -def main(): - from optparse import OptionParser - parser = OptionParser(usage=usage) - parser.add_option("-v", "--verbose", default=0, action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - - (options, args) = parser.parse_args() - - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - connection_string = load_setup_connection_string('sqlitetests') - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - if options.verbose: - cnxn = pyodbc.connect(connection_string) - print_library_info(cnxn) - cnxn.close() - - suite = load_tests(SqliteTestCase, options.test, connection_string) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - - add_to_path() - - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/sqlservertests.py b/tests2/sqlservertests.py deleted file mode 100755 index 6e6e37b8..00000000 --- a/tests2/sqlservertests.py +++ /dev/null @@ -1,2036 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -from __future__ import print_function - -usage = """\ -usage: %prog [options] connection_string - -Unit tests for SQL Server. To use, pass a connection string as the parameter. -The tests will create and drop tables t1 and t2 as necessary. - -These run using the version from the 'build' directory, not the version -installed into the Python directories. You must run python setup.py build -before running the tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [sqlservertests] - connection-string=DRIVER={SQL Server};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db - -The connection string above will use the 2000/2005 driver, even if SQL Server 2008 -is installed: - - 2000: DRIVER={SQL Server} - 2005: DRIVER={SQL Server} - 2008: DRIVER={SQL Server Native Client 10.0} - -If using FreeTDS ODBC, be sure to use version 1.00.97 or newer. -""" - -import sys, os, re, uuid -import unittest -from decimal import Decimal -from datetime import datetime, date, time -from os.path import join, getsize, dirname, abspath -from warnings import warn -from testutils import * - -# Some tests have fallback code for known driver issues. -# Change this value to False to bypass the fallback code, e.g., to see -# if a newer version of the driver has fixed the underlying issue. -# -handle_known_issues = True - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class SqlServerTestCase(unittest.TestCase): - - SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] - LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] - MAX_FENCEPOST_SIZES = [ 5 * 1024 * 1024 ] #, 50 * 1024 * 1024 ] - - ANSI_SMALL_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] - UNICODE_SMALL_FENCEPOSTS = [ unicode(s) for s in ANSI_SMALL_FENCEPOSTS ] - ANSI_LARGE_FENCEPOSTS = ANSI_SMALL_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] - UNICODE_LARGE_FENCEPOSTS = UNICODE_SMALL_FENCEPOSTS + [ unicode(s) for s in [_generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ]] - - ANSI_MAX_FENCEPOSTS = ANSI_LARGE_FENCEPOSTS + [ _generate_test_string(size) for size in MAX_FENCEPOST_SIZES ] - UNICODE_MAX_FENCEPOSTS = UNICODE_LARGE_FENCEPOSTS + [ unicode(s) for s in [_generate_test_string(size) for size in MAX_FENCEPOST_SIZES ]] - - - def __init__(self, method_name, connection_string): - unittest.TestCase.__init__(self, method_name) - self.connection_string = connection_string - - def driver_type_is(self, type_name): - recognized_types = { - 'msodbcsql': '(Microsoft) ODBC Driver xx for SQL Server', - 'freetds': 'FreeTDS ODBC', - } - if not type_name in recognized_types.keys(): - raise KeyError('"{0}" is not a recognized driver type: {1}'.format(type_name, list(recognized_types.keys()))) - driver_name = self.cnxn.getinfo(pyodbc.SQL_DRIVER_NAME).lower() - if type_name == 'msodbcsql': - return ('msodbcsql' in driver_name) or ('sqlncli' in driver_name) or ('sqlsrv32.dll' == driver_name) - elif type_name == 'freetds': - return ('tdsodbc' in driver_name) - - def handle_known_issues_for(self, type_name, print_reminder=False): - """ - Checks driver `type_name` and "killswitch" variable `handle_known_issues` to see if - known issue handling should be bypassed. Optionally prints a reminder message to - help identify tests that previously had issues but may have been fixed by a newer - version of the driver. - - Usage examples: - - # 1. print reminder at beginning of test (before any errors can occur) - # - def test_some_feature(self): - self.handle_known_issues_for('freetds', print_reminder=True) - # (continue with test code) - - # 2. conditional execution of fallback code - # - try: - # (some test code) - except pyodbc.DataError: - if self.handle_known_issues_for('freetds'): - # FREETDS_KNOWN_ISSUE - # - # (fallback code to work around exception) - else: - raise - """ - if self.driver_type_is(type_name): - if handle_known_issues: - return True - else: - if print_reminder: - print("Known issue handling is disabled. Does this test still fail?") - return False - - def driver_type_is(self, type_name): - recognized_types = { - 'msodbcsql': '(Microsoft) ODBC Driver xx for SQL Server', - 'freetds': 'FreeTDS ODBC', - } - if not type_name in recognized_types.keys(): - raise KeyError('"{0}" is not a recognized driver type: {1}'.format(type_name, list(recognized_types.keys()))) - driver_name = self.cnxn.getinfo(pyodbc.SQL_DRIVER_NAME).lower() - if type_name == 'msodbcsql': - return ('msodbcsql' in driver_name) or ('sqlncli' in driver_name) or ('sqlsrv32.dll' == driver_name) - elif type_name == 'freetds': - return ('tdsodbc' in driver_name) - - def get_sqlserver_version(self): - """ - Returns the major version: 8-->2000, 9-->2005, 10-->2008 - """ - self.cursor.execute("exec master..xp_msver 'ProductVersion'") - row = self.cursor.fetchone() - return int(row.Character_Value.split('.', 1)[0]) - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string) - self.cursor = self.cnxn.cursor() - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - for i in range(3): - try: - self.cursor.execute("drop procedure proc%d" % i) - self.cnxn.commit() - except: - pass - - try: - self.cursor.execute('drop function func1') - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_binary_type(self): - if sys.hexversion >= 0x02060000: - self.assertTrue(pyodbc.BINARY is bytearray) - else: - self.assertTrue(pyodbc.BINARY is buffer) - - def test_multiple_bindings(self): - "More than one bind and select on a cursor" - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t1 values (?)", 2) - self.cursor.execute("insert into t1 values (?)", 3) - for i in range(3): - self.cursor.execute("select n from t1 where n < ?", 10) - self.cursor.execute("select n from t1 where n < 3") - - - def test_different_bindings(self): - self.cursor.execute("create table t1(n int)") - self.cursor.execute("create table t2(d datetime)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t2 values (?)", datetime.now()) - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - def test_noscan(self): - self.assertEqual(self.cursor.noscan, False) - self.cursor.noscan = True - self.assertEqual(self.cursor.noscan, True) - - def test_nonnative_uuid(self): - # The default is False meaning we should return a string. Note that - # SQL Server seems to always return uppercase. - value = uuid.uuid4() - self.cursor.execute("create table t1(n uniqueidentifier)") - self.cursor.execute("insert into t1 values (?)", value) - - pyodbc.native_uuid = False - result = self.cursor.execute("select n from t1").fetchval() - self.assertEqual(type(result), unicode) - self.assertEqual(result, unicode(value).upper()) - - def test_native_uuid(self): - # When true, we should return a uuid.UUID object. - value = uuid.uuid4() - self.cursor.execute("create table t1(n uniqueidentifier)") - self.cursor.execute("insert into t1 values (?)", value) - - pyodbc.native_uuid = True - result = self.cursor.execute("select n from t1").fetchval() - self.assertTrue(isinstance(result, uuid.UUID)) - self.assertEqual(value, result) - - def test_nextset(self): - self.cursor.execute("create table t1(i int)") - for i in range(4): - self.cursor.execute("insert into t1(i) values(?)", i) - - self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") - - for i, row in enumerate(self.cursor): - self.assertEqual(i, row.i) - - self.assertEqual(self.cursor.nextset(), True) - - for i, row in enumerate(self.cursor): - self.assertEqual(i + 2, row.i) - - def test_nextset_with_raiserror(self): - self.cursor.execute("select i = 1; RAISERROR('c', 16, 1);") - row = next(self.cursor) - self.assertEqual(1, row.i) - if self.driver_type_is('freetds'): - warn('FREETDS_KNOWN_ISSUE - test_nextset_with_raiserror: test cancelled.') - # AssertionError: ProgrammingError not raised by nextset - # https://github.com/FreeTDS/freetds/issues/230 - return # for now - self.assertRaises(pyodbc.ProgrammingError, self.cursor.nextset) - - def test_fixed_unicode(self): - value = u"t\xebsting" - self.cursor.execute("create table t1(s nchar(7))") - self.cursor.execute("insert into t1 values(?)", u"t\xebsting") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), unicode) - self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL - self.assertEqual(v, value) - - - def _test_strtype(self, sqltype, value, resulttype=None, colsize=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize in (None, 'max') or isinstance(colsize, int), colsize - assert colsize in (None, 'max') or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - self.cursor.execute(sql) - - if resulttype is None: - resulttype = type(value) - - sql = "insert into t1 values(?)" - try: - self.cursor.execute(sql, value) - except pyodbc.DataError: - if self.driver_type_is('freetds'): - # FREETDS_KNOWN_ISSUE - # - # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so - # pyodbc can't call SQLDescribeParam to get the correct parameter type. - # This can lead to errors being returned from SQL Server when sp_prepexec is called, - # e.g., "Implicit conversion from data type varchar to varbinary is not allowed." - # for test_binary_null - # - # So at least verify that the user can manually specify the parameter type - if sqltype == 'varbinary': - sql_param_type = pyodbc.SQL_VARBINARY - # (add elif blocks for other cases as required) - self.cursor.setinputsizes([(sql_param_type, colsize, 0)]) - self.cursor.execute(sql, value) - else: - raise - v = self.cursor.execute("select * from t1").fetchone()[0] - - # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before - # comparing. - if type(value) is not resulttype: - value = resulttype(value) - - self.assertEqual(v, value) - - - def _test_strliketype(self, sqltype, value, resulttype=None, colsize=None): - """ - The implementation for text, image, ntext, and binary. - - These types do not support comparison operators. - """ - assert colsize is None or isinstance(colsize, int), colsize - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - if resulttype is None: - resulttype = type(value) - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(result), resulttype) - - # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before - # comparing. - if type(value) is not resulttype: - value = resulttype(value) - - self.assertEqual(result, value) - - - # - # varchar - # - - def test_varchar_null(self): - self._test_strtype('varchar', None, colsize=100) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, colsize=len(value)) - return t - for value in UNICODE_SMALL_FENCEPOSTS: - locals()['test_varchar_%s' % len(value)] = _maketest(value) - - # Also test varchar(max) - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, colsize='max') - return t - for value in UNICODE_MAX_FENCEPOSTS: - locals()['test_varcharmax_%s' % len(value)] = _maketest(value) - - def test_varchar_many(self): - self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") - - v1 = 'ABCDEFGHIJ' * 30 - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); - row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() - - self.assertEqual(v1, row.c1) - self.assertEqual(v2, row.c2) - self.assertEqual(v3, row.c3) - - def test_varchar_upperlatin(self): - self._test_strtype('varchar', u'\u00e5', colsize=1) - - # - # nvarchar - # - - def test_nvarchar_null(self): - self._test_strtype('nvarchar', None, colsize=100) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, colsize=len(value)) - return t - for value in UNICODE_SMALL_FENCEPOSTS: - locals()['test_nvarchar_%s' % len(value)] = _maketest(value) - - # Also test nvarchar(max) - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, colsize='max') - return t - for value in UNICODE_MAX_FENCEPOSTS: - locals()['test_nvarcharmax_%s' % len(value)] = _maketest(value) - - def test_unicode_upperlatin(self): - self._test_strtype('nvarchar', u'\u00e5', colsize=1) - - def test_unicode_longmax(self): - # Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes - - ver = self.get_sqlserver_version() - if ver < 9: # 2005+ - return # so pass / ignore - self.cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))") - - def test_fast_executemany_to_local_temp_table(self): - if self.driver_type_is('freetds'): - warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_to_local_temp_table: test cancelled.') - return - v = u'Ώπα' - self.cursor.execute("CREATE TABLE #issue295 (id INT IDENTITY PRIMARY KEY, txt NVARCHAR(50))") - sql = "INSERT INTO #issue295 (txt) VALUES (?)" - params = [(v,)] - self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 50, 0)]) - self.cursor.fast_executemany = True - self.cursor.executemany(sql, params) - self.assertEqual(self.cursor.execute("SELECT txt FROM #issue295").fetchval(), v) - - def test_fast_executemany_to_datetime2(self): - if self.handle_known_issues_for('freetds', print_reminder=True): - warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_to_datetime2: test cancelled.') - return - v = datetime(2019, 3, 12, 10, 0, 0, 123456) - self.cursor.execute("CREATE TABLE ##issue540 (dt2 DATETIME2(2))") - sql = "INSERT INTO ##issue540 (dt2) VALUES (?)" - params = [(v,)] - self.cursor.fast_executemany = True - self.cursor.executemany(sql, params) - self.assertEqual(self.cursor.execute("SELECT CAST(dt2 AS VARCHAR) FROM ##issue540").fetchval(), '2019-03-12 10:00:00.12') - - def test_fast_executemany_high_unicode(self): - if self.handle_known_issues_for('freetds', print_reminder=True): - warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_high_unicode: test cancelled.') - return - v = u"🎥" - self.cursor.fast_executemany = True - self.cursor.execute("CREATE TABLE t1 (col1 nvarchar(max) null)") - self.cursor.executemany("INSERT INTO t1 (col1) VALUES (?)", [[v,]]) - self.assertEqual(self.cursor.execute("SELECT * FROM t1").fetchone()[0], v) - - # - # binary - # - - def test_binary_null(self): - self._test_strtype('varbinary', None, colsize=100) - - def test_large_binary_null(self): - # Bug 1575064 - self._test_strtype('varbinary', None, colsize=4000) - - def test_binaryNull_object(self): - self.cursor.execute("create table t1(n varbinary(10))") - self.cursor.execute("insert into t1 values (?)", pyodbc.BinaryNull); - - # buffer - - def _maketest(value): - def t(self): - self._test_strtype('varbinary', buffer(value), resulttype=pyodbc.BINARY, colsize=len(value)) - return t - for value in ANSI_SMALL_FENCEPOSTS: - locals()['test_binary_buffer_%s' % len(value)] = _maketest(value) - - # bytearray - - if sys.hexversion >= 0x02060000: - def _maketest(value): - def t(self): - self._test_strtype('varbinary', bytearray(value), colsize=len(value)) - return t - for value in ANSI_SMALL_FENCEPOSTS: - locals()['test_binary_bytearray_%s' % len(value)] = _maketest(value) - - # varbinary(max) - def _maketest(value): - def t(self): - self._test_strtype('varbinary', buffer(value), resulttype=pyodbc.BINARY, colsize='max') - return t - for value in ANSI_MAX_FENCEPOSTS: - locals()['test_binarymax_buffer_%s' % len(value)] = _maketest(value) - - # bytearray - - if sys.hexversion >= 0x02060000: - def _maketest(value): - def t(self): - self._test_strtype('varbinary', bytearray(value), colsize='max') - return t - for value in ANSI_MAX_FENCEPOSTS: - locals()['test_binarymax_bytearray_%s' % len(value)] = _maketest(value) - - # - # image - # - - def test_image_null(self): - self._test_strliketype('image', None, type(None)) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strliketype('image', buffer(value), pyodbc.BINARY) - return t - for value in ANSI_LARGE_FENCEPOSTS: - locals()['test_image_buffer_%s' % len(value)] = _maketest(value) - - if sys.hexversion >= 0x02060000: - # Python 2.6+ supports bytearray, which pyodbc considers varbinary. - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('image', bytearray(value)) - return t - for value in ANSI_LARGE_FENCEPOSTS: - locals()['test_image_bytearray_%s' % len(value)] = _maketest(value) - - def test_image_upperlatin(self): - self._test_strliketype('image', buffer('á'), pyodbc.BINARY) - - # - # text - # - - # def test_empty_text(self): - # self._test_strliketype('text', bytearray('')) - - def test_null_text(self): - self._test_strliketype('text', None, type(None)) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strliketype('text', value) - return t - for value in UNICODE_SMALL_FENCEPOSTS: - locals()['test_text_buffer_%s' % len(value)] = _maketest(value) - - def test_text_upperlatin(self): - self._test_strliketype('text', u'á') - - # - # xml - # - - # def test_empty_xml(self): - # self._test_strliketype('xml', bytearray('')) - - def test_null_xml(self): - self._test_strliketype('xml', None, type(None)) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strliketype('xml', value) - return t - for value in UNICODE_SMALL_FENCEPOSTS: - locals()['test_xml_buffer_%s' % len(value)] = _maketest(value) - - def test_xml_str(self): - # SQL Server treats XML like *binary* data. - # See https://msdn.microsoft.com/en-us/library/ms131375.aspx - # - # The real problem with this is that we *don't* know that a value is - # XML when we write it to the database. It is either an `str` or a - # `unicode` object, so we're going to convert it into one of *two* - # different formats. - # - # When we read it out of the database, all we know is that it is XML - # and we don't know how it was encoded so we don't know how to decode - # it. Since almost everyone treats XML as Unicode nowadays, we're going - # to decode XML as Unicode. Force your XML to Unicode before writing - # to the database. (Otherwise, set a global encoder for the XML type.) - ascii = 'test' - val = unicode(ascii) - self.cursor.execute("create table t1(a xml)") - self.cursor.execute("insert into t1 values (?)", val) - result = self.cursor.execute("select a from t1").fetchval() - self.assertEqual(result, val) - - def test_xml_upperlatin(self): - val = u'á' - self.cursor.execute("create table t1(a xml)") - self.cursor.execute("insert into t1 values (?)", val) - result = self.cursor.execute("select a from t1").fetchval() - self.assertEqual(result, val) - - # - # bit - # - - def test_bit(self): - value = True - self.cursor.execute("create table t1(b bit)") - self.cursor.execute("insert into t1 values (?)", value) - v = self.cursor.execute("select b from t1").fetchone()[0] - self.assertEqual(type(v), bool) - self.assertEqual(v, value) - - # - # decimal - # - - def _decimal(self, precision, scale, negative): - # From test provided by planders (thanks!) in Issue 91 - - self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) - - # Construct a decimal that uses the maximum precision and scale. - decStr = '9' * (precision - scale) - if scale: - decStr = decStr + "." + '9' * scale - if negative: - decStr = "-" + decStr - value = Decimal(decStr) - - self.cursor.execute("insert into t1 values(?)", value) - - v = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(v, value) - - def _maketest(p, s, n): - def t(self): - self._decimal(p, s, n) - return t - for (p, s, n) in [ (1, 0, False), - (1, 0, True), - (6, 0, False), - (6, 2, False), - (6, 4, True), - (6, 6, True), - (38, 0, False), - (38, 10, False), - (38, 38, False), - (38, 0, True), - (38, 10, True), - (38, 38, True) ]: - locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) - - - def test_decimal_e(self): - """Ensure exponential notation decimals are properly handled""" - value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 - self.cursor.execute("create table t1(d decimal(10, 2))") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_subquery_params(self): - """Ensure parameter markers work in a subquery""" - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - row = self.cursor.execute(""" - select x.id - from ( - select id - from t1 - where s = ? - and id between ? and ? - ) x - """, 'test', 1, 10).fetchone() - self.assertNotEqual(row, None) - self.assertEqual(row[0], 1) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_string(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_empty_string_encoding(self): - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') - value = "" - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_fixed_char(self): - value = "testing" - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_empty_unicode(self): - self.cursor.execute("create table t1(s nvarchar(20))") - self.cursor.execute("insert into t1 values(?)", u"") - - def test_empty_unicode_encoding(self): - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') - value = "" - self.cursor.execute("create table t1(s nvarchar(20))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_unicode_query(self): - self.cursor.execute(u"select 1") - - # From issue #206 - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, colsize=len(value)) - return t - locals()['test_chinese_param'] = _maketest(u'我的') - - def test_chinese(self): - v = u'我的' - self.cursor.execute(u"SELECT N'我的' AS [Name]") - row = self.cursor.fetchone() - self.assertEqual(row[0], v) - - self.cursor.execute(u"SELECT N'我的' AS [Name]") - rows = self.cursor.fetchall() - self.assertEqual(rows[0][0], v) - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - # - # date, time, datetime - # - - def test_datetime(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(value, result) - - def test_datetime_fraction(self): - # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime - # supported is xxx000. - - value = datetime(2007, 1, 15, 3, 4, 5, 123000) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(value, result) - - def test_datetime_fraction_rounded(self): - # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the - # database supports. - - full = datetime(2007, 1, 15, 3, 4, 5, 123456) - rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", full) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(rounded, result) - - def test_date(self): - ver = self.get_sqlserver_version() - if ver < 10: # 2008 only - return # so pass / ignore - - value = date.today() - - self.cursor.execute("create table t1(d date)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(type(result), date) - self.assertEqual(value, result) - - def test_time(self): - ver = self.get_sqlserver_version() - if ver < 10: # 2008 only - return # so pass / ignore - - value = datetime.now().time() - - # We aren't yet writing values using the new extended time type so the value written to the database is only - # down to the second. - value = value.replace(microsecond=0) - - self.cursor.execute("create table t1(t time)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select t from t1").fetchone()[0] - self.assertEqual(type(result), time) - self.assertEqual(value, result) - - def test_datetime2(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime2)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(value, result) - - # - # ints and floats - # - - def test_int(self): - value = 1234 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_int(self): - value = -1 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_bigint(self): - input = 3000000000 - self.cursor.execute("create table t1(d bigint)") - self.cursor.execute("insert into t1 values (?)", input) - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(result, input) - - def test_overflow_int(self): - # python allows integers of any size, bigger than an 8 byte int can contain - input = 9999999999999999999999999999999999999 - self.cursor.execute("create table t1(d bigint)") - self.cnxn.commit() - self.assertRaises(OverflowError, self.cursor.execute, "insert into t1 values (?)", input) - result = self.cursor.execute("select * from t1").fetchall() - self.assertEqual(result, []) - - def test_float(self): - value = 1234.567 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_denorm_float(self): - value = 0.00012345 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - def test_non_numeric_float(self): - self.cursor.execute("create table t1(d float)") - self.cnxn.commit() - for input in (float('+Infinity'), float('-Infinity'), float('NaN')): - self.assertRaises(pyodbc.ProgrammingError, self.cursor.execute, "insert into t1 values (?)", input) - result = self.cursor.execute("select * from t1").fetchall() - self.assertEqual(result, []) - - - # - # stored procedures - # - - # def test_callproc(self): - # "callproc with a simple input-only stored procedure" - # pass - - def test_sp_results(self): - self.cursor.execute( - """ - Create procedure proc1 - AS - select top 10 name, id, xtype, refdate - from sysobjects - """) - rows = self.cursor.execute("exec proc1").fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - - def test_sp_results_from_temp(self): - - # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. - # If you don't do this, you'd need to call nextset() once to skip it. - - self.cursor.execute( - """ - Create procedure proc1 - AS - set nocount on - select top 10 name, id, xtype, refdate - into #tmptable - from sysobjects - - select * from #tmptable - """) - self.cursor.execute("exec proc1") - self.assertTrue(self.cursor.description is not None) - self.assertTrue(len(self.cursor.description) == 4) - - rows = self.cursor.fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - - def test_sp_results_from_vartbl(self): - self.cursor.execute( - """ - Create procedure proc1 - AS - set nocount on - declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime) - - insert into @tmptbl - select top 10 name, id, xtype, refdate - from sysobjects - - select * from @tmptbl - """) - self.cursor.execute("exec proc1") - rows = self.cursor.fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - def test_sp_with_dates(self): - # Reported in the forums that passing two datetimes to a stored procedure doesn't work. - self.cursor.execute( - """ - if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) - drop procedure [dbo].[test_sp] - """) - self.cursor.execute( - """ - create procedure test_sp(@d1 datetime, @d2 datetime) - AS - declare @d as int - set @d = datediff(year, @d1, @d2) - select @d - """) - self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(rows[0][0] == 0) # 0 years apart - - def test_sp_with_none(self): - # Reported in the forums that passing None caused an error. - self.cursor.execute( - """ - if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) - drop procedure [dbo].[test_sp] - """) - self.cursor.execute( - """ - create procedure test_sp(@x varchar(20)) - AS - declare @y varchar(20) - set @y = @x - select @y - """) - self.cursor.execute("exec test_sp ?", None) - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(rows[0][0] == None) # 0 years apart - - - # - # rowcount - # - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - """ - Ensure Cursor.rowcount is set properly after a select statement. - - pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a - select statement, so we'll test for that behavior. This is valid behavior according to the DB API - specification, but people don't seem to like it. - """ - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, -1) - - rows = self.cursor.fetchall() - self.assertEqual(len(rows), count) - self.assertEqual(self.cursor.rowcount, -1) - - def test_rowcount_reset(self): - "Ensure rowcount is reset after DDL" - - ddl_rowcount = 0 if self.driver_type_is('freetds') else -1 - - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.assertEqual(self.cursor.rowcount, 1) - - self.cursor.execute("create table t2(i int)") - self.assertEqual(self.cursor.rowcount, ddl_rowcount) - - # - # always return Cursor - # - - # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very - # confusing when things went wrong and added very little value even when things went right since users could always - # use: cursor.execute("...").rowcount - - def test_retcursor_delete(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_select(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("select * from t1") - self.assertEqual(v, self.cursor) - - # - # misc - # - - def table_with_spaces(self): - "Ensure we can select using [x z] syntax" - - try: - self.cursor.execute("create table [test one](int n)") - self.cursor.execute("insert into [test one] values(1)") - self.cursor.execute("select * from [test one]") - v = self.cursor.fetchone()[0] - self.assertEqual(v, 1) - finally: - self.cnxn.rollback() - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_temp_select(self): - # A project was failing to create temporary tables via select into. - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), unicode) - self.assertEqual(v, "testing") - - self.cursor.execute("select s into t2 from t1") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), unicode) - self.assertEqual(v, "testing") - - - def test_money(self): - d = Decimal('123456.78') - self.cursor.execute("create table t1(i int identity(1,1), m money)") - self.cursor.execute("insert into t1(m) values (?)", d) - v = self.cursor.execute("select m from t1").fetchone()[0] - self.assertEqual(v, d) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_one(self): - "Pass executemany a single sequence" - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, "test") ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - def test_executemany_dae_0(self): - """ - DAE for 0-length value - """ - self.cursor.execute("create table t1(a nvarchar(max))") - - self.cursor.fast_executemany = True - self.cursor.executemany("insert into t1(a) values(?)", [['']]) - - self.assertEqual(self.cursor.execute("select a from t1").fetchone()[0], '') - - self.cursor.fast_executemany = False - - def test_executemany_failure(self): - """ - Ensure that an exception is raised if one query in an executemany fails. - """ - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, 'good'), - ('error', 'not an int'), - (3, 'good') ] - - self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_concatenation(self): - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") - self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) - - row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() - - self.assertEqual(row.both, v2 + v3) - - def test_view_select(self): - # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. - - # Create a table (t1) with 3 rows and a view (t2) into it. - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") - for i in range(3): - self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) - self.cursor.execute("create view t2 as select * from t1") - - # Select from the view - self.cursor.execute("select * from t2") - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(len(rows) == 3) - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - def test_cursorcommit(self): - "Ensure cursor.commit works" - othercnxn = pyodbc.connect(self.connection_string) - othercursor = othercnxn.cursor() - othercnxn = None - - othercursor.execute("create table t1(s varchar(20))") - othercursor.execute("insert into t1 values(?)", 'test') - othercursor.commit() - - value = self.cursor.execute("select s from t1").fetchone()[0] - self.assertEqual(value, 'test') - - - def test_unicode_results(self): - "Ensure unicode_results forces Unicode" - othercnxn = pyodbc.connect(self.connection_string, unicode_results=True) - othercursor = othercnxn.cursor() - - # ANSI data in an ANSI column ... - othercursor.execute("create table t1(s varchar(20))") - othercursor.execute("insert into t1 values(?)", 'test') - - # ... should be returned as Unicode - value = othercursor.execute("select s from t1").fetchone()[0] - self.assertEqual(value, u'test') - - - def test_sqlserver_callproc(self): - try: - self.cursor.execute("drop procedure pyodbctest") - self.cnxn.commit() - except: - pass - - self.cursor.execute("create table t1(s varchar(10))") - self.cursor.execute("insert into t1 values(?)", "testing") - - self.cursor.execute(""" - create procedure pyodbctest @var1 varchar(32) - as - begin - select s - from t1 - return - end - """) - self.cnxn.commit() - - # for row in self.cursor.procedureColumns('pyodbctest'): - # print row.procedure_name, row.column_name, row.column_type, row.type_name - - self.cursor.execute("exec pyodbctest 'hi'") - - # print self.cursor.description - # for row in self.cursor: - # print row.s - - def test_skip(self): - # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. - - self.cursor.execute("create table t1(id int)"); - for i in range(1, 5): - self.cursor.execute("insert into t1 values(?)", i) - self.cursor.execute("select id from t1 order by id") - self.assertEqual(self.cursor.fetchone()[0], 1) - self.cursor.skip(2) - self.assertEqual(self.cursor.fetchone()[0], 4) - - def test_timeout(self): - self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) - - self.cnxn.timeout = 30 - self.assertEqual(self.cnxn.timeout, 30) - - self.cnxn.timeout = 0 - self.assertEqual(self.cnxn.timeout, 0) - - def test_sets_execute(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) - - self.assertRaises(pyodbc.ProgrammingError, f) - - def test_sets_executemany(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.executemany("insert into t1 (word) values (?)", [words]) - - self.assertRaises(TypeError, f) - - def test_row_execute(self): - "Ensure we can use a Row object as a parameter to execute" - self.cursor.execute("create table t1(n int, s varchar(10))") - self.cursor.execute("insert into t1 values (1, 'a')") - row = self.cursor.execute("select n, s from t1").fetchone() - self.assertNotEqual(row, None) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.execute("insert into t2 values (?, ?)", row) - - def test_row_executemany(self): - "Ensure we can use a Row object as a parameter to executemany" - self.cursor.execute("create table t1(n int, s varchar(10))") - - for i in range(3): - self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) - - rows = self.cursor.execute("select n, s from t1").fetchall() - self.assertNotEqual(len(rows), 0) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.executemany("insert into t2 values (?, ?)", rows) - - def test_description(self): - "Ensure cursor.description is correct" - - self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") - self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") - self.cursor.execute("select * from t1") - - # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the - # items I do know. - - # int - t = self.cursor.description[0] - self.assertEqual(t[0], 'n') - self.assertEqual(t[1], int) - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # varchar(8) - t = self.cursor.description[1] - self.assertEqual(t[0], 's') - self.assertEqual(t[1], str) - self.assertEqual(t[4], 8) # precision - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # decimal(5, 2) - t = self.cursor.description[2] - self.assertEqual(t[0], 'd') - self.assertEqual(t[1], Decimal) - self.assertEqual(t[4], 5) # precision - self.assertEqual(t[5], 2) # scale - self.assertEqual(t[6], True) # nullable - - def test_cursor_messages_with_print(self): - """ - Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement. - """ - # self.cursor is used in setUp, hence is not brand new at this point - brand_new_cursor = self.cnxn.cursor() - self.assertIsNone(brand_new_cursor.messages) - - # SQL Server PRINT statements are never more than 8000 characters - # https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks - for msg in ('hello world', 'ABCDEFGHIJ' * 800): - self.cursor.execute("PRINT '{}'".format(msg)) - messages = self.cursor.messages - self.assertTrue(type(messages) is list) - self.assertEqual(len(messages), 1) - self.assertTrue(type(messages[0]) is tuple) - self.assertEqual(len(messages[0]), 2) - self.assertTrue(type(messages[0][0]) is unicode) - self.assertTrue(type(messages[0][1]) is unicode) - self.assertEqual('[01000] (0)', messages[0][0]) - self.assertTrue(messages[0][1].endswith(msg)) - - def test_cursor_messages_with_stored_proc(self): - """ - Complex scenario to test the Cursor.messages attribute. - """ - self.cursor.execute(""" - CREATE OR ALTER PROCEDURE test_cursor_messages AS - BEGIN - SET NOCOUNT ON; - PRINT 'Message 1a'; - PRINT 'Message 1b'; - SELECT N'Field 1a' AS F UNION ALL SELECT N'Field 1b'; - SELECT N'Field 2a' AS F UNION ALL SELECT N'Field 2b'; - PRINT 'Message 2a'; - PRINT 'Message 2b'; - END - """) - # result set 1 - self.cursor.execute("EXEC test_cursor_messages") - rows = [tuple(r) for r in self.cursor.fetchall()] # convert pyodbc.Row objects for ease of use - self.assertEqual(len(rows), 2) - self.assertSequenceEqual(rows, [('Field 1a', ), ('Field 1b', )]) - self.assertEqual(len(self.cursor.messages), 2) - self.assertTrue(self.cursor.messages[0][1].endswith('Message 1a')) - self.assertTrue(self.cursor.messages[1][1].endswith('Message 1b')) - # result set 2 - self.assertTrue(self.cursor.nextset()) - rows = [tuple(r) for r in self.cursor.fetchall()] # convert pyodbc.Row objects for ease of use - self.assertEqual(len(rows), 2) - self.assertSequenceEqual(rows, [('Field 2a', ), ('Field 2b', )]) - self.assertEqual(self.cursor.messages, []) - # result set 3 - self.assertTrue(self.cursor.nextset()) - with self.assertRaises(pyodbc.ProgrammingError): - self.cursor.fetchall() - self.assertEqual(len(self.cursor.messages), 2) - self.assertTrue(self.cursor.messages[0][1].endswith('Message 2a')) - self.assertTrue(self.cursor.messages[1][1].endswith('Message 2b')) - # result set 4 (which shouldn't exist) - self.assertFalse(self.cursor.nextset()) - with self.assertRaises(pyodbc.ProgrammingError): - self.cursor.fetchall() - self.assertEqual(self.cursor.messages, []) - - def test_none_param(self): - "Ensure None can be used for params other than the first" - # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used - # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the - # first column, but did not work for later columns. - # - # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, - # binary/varbinary won't allow an implicit conversion. - - self.cursor.execute("create table t1(n int, blob varbinary(max))") - self.cursor.execute("insert into t1 values (1, newid())") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row.n, 1) - self.assertEqual(type(row.blob), bytearray) - - sql = "update t1 set n=?, blob=?" - try: - self.cursor.execute(sql, 2, None) - except pyodbc.DataError: - if self.driver_type_is('freetds'): - # FREETDS_KNOWN_ISSUE - # - # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so - # pyodbc can't call SQLDescribeParam to get the correct parameter type. - # This can lead to errors being returned from SQL Server when sp_prepexec is called, - # e.g., "Implicit conversion from data type varchar to varbinary(max) is not allowed." - # - # So at least verify that the user can manually specify the parameter type - self.cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) - self.cursor.execute(sql, 2, None) - else: - raise - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row.n, 2) - self.assertEqual(row.blob, None) - - - def test_output_conversion(self): - def convert(value): - # `value` will be a string. We'll simply add an X at the beginning at the end. - return 'X' + value + 'X' - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) - self.cursor.execute("create table t1(n int, v varchar(10))") - self.cursor.execute("insert into t1 values (1, '123.45')") - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - - # Now clear the conversions and try again. There should be no Xs this time. - self.cnxn.clear_output_converters() - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - - - def test_too_large(self): - """Ensure error raised if insert fails due to truncation""" - value = 'x' * 1000 - self.cursor.execute("create table t1(s varchar(800))") - def test(): - self.cursor.execute("insert into t1 values (?)", value) - # different versions of SQL Server generate different errors - self.assertRaises((pyodbc.DataError, pyodbc.ProgrammingError), test) - - def test_geometry_null_insert(self): - def convert(value): - return value - - self.cnxn.add_output_converter(-151, convert) # -151 is SQL Server's geometry - self.cursor.execute("create table t1(n int, v geometry)") - self.cursor.execute("insert into t1 values (?, ?)", 1, None) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, None) - self.cnxn.clear_output_converters() - - def test_login_timeout(self): - # This can only test setting since there isn't a way to cause it to block on the server side. - cnxns = pyodbc.connect(self.connection_string, timeout=2) - - def test_row_equal(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test')") - row1 = self.cursor.execute("select n, s from t1").fetchone() - row2 = self.cursor.execute("select n, s from t1").fetchone() - b = (row1 == row2) - self.assertEqual(b, True) - - def test_row_gtlt(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test1')") - self.cursor.execute("insert into t1 values (1, 'test2')") - rows = self.cursor.execute("select n, s from t1 order by s").fetchall() - self.assertTrue(rows[0] < rows[1]) - self.assertTrue(rows[0] <= rows[1]) - self.assertTrue(rows[1] > rows[0]) - self.assertTrue(rows[1] >= rows[0]) - self.assertTrue(rows[0] != rows[1]) - - rows = list(rows) - rows.sort() # uses < - - def test_context_manager_success(self): - """ - Ensure a successful with statement causes a commit. - """ - self.cursor.execute("create table t1(n int)") - self.cnxn.commit() - - with pyodbc.connect(self.connection_string) as cnxn: - cursor = cnxn.cursor() - cursor.execute("insert into t1 values (1)") - - cnxn = None - cursor = None - - rows = self.cursor.execute("select n from t1").fetchall() - self.assertEqual(len(rows), 1) - self.assertEqual(rows[0][0], 1) - - - def test_context_manager_fail(self): - """ - Ensure an exception in a with statement causes a rollback. - """ - self.cursor.execute("create table t1(n int)") - self.cnxn.commit() - - try: - with pyodbc.connect(self.connection_string) as cnxn: - cursor = cnxn.cursor() - cursor.execute("insert into t1 values (1)") - raise Exception("Testing failure") - except Exception: - pass - - cnxn = None - cursor = None - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, 0) - - - def test_cursor_context_manager_success(self): - """ - Ensure a successful with statement using a cursor causes a commit. - """ - self.cursor.execute("create table t1(n int)") - self.cnxn.commit() - - with pyodbc.connect(self.connection_string).cursor() as cursor: - cursor.execute("insert into t1 values (1)") - - cursor = None - - rows = self.cursor.execute("select n from t1").fetchall() - self.assertEqual(len(rows), 1) - self.assertEqual(rows[0][0], 1) - - - def test_cursor_context_manager_fail(self): - """ - Ensure an exception in a with statement using a cursor causes a rollback. - """ - self.cursor.execute("create table t1(n int)") - self.cnxn.commit() - - try: - with pyodbc.connect(self.connection_string).cursor() as cursor: - cursor.execute("insert into t1 values (1)") - raise Exception("Testing failure") - except Exception: - pass - - cursor = None - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, 0) - - - def test_untyped_none(self): - # From issue 129 - value = self.cursor.execute("select ?", None).fetchone()[0] - self.assertEqual(value, None) - - def test_large_update_nodata(self): - self.cursor.execute('create table t1(a varbinary(max))') - hundredkb = bytearray('x'*100*1024) - self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) - - def test_func_param(self): - self.cursor.execute(''' - create function func1 (@testparam varchar(4)) - returns @rettest table (param varchar(4)) - as - begin - insert @rettest - select @testparam - return - end - ''') - self.cnxn.commit() - value = self.cursor.execute("select * from func1(?)", 'test').fetchone()[0] - self.assertEqual(value, 'test') - - def test_no_fetch(self): - # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to - # confuse the driver. - self.cursor.execute('select 1') - self.cursor.execute('select 1') - self.cursor.execute('select 1') - - def test_drivers(self): - drivers = pyodbc.drivers() - self.assertEqual(list, type(drivers)) - self.assertTrue(len(drivers) > 0) - - m = re.search('DRIVER={?([^}]+?)}?;', self.connection_string, re.IGNORECASE) - if m: # issue #1000 - may be testing with DSN= connection - current = m.group(1) - self.assertTrue(current in drivers) - - def test_prepare_cleanup(self): - # When statement is prepared, it is kept in case the next execute uses the same statement. This must be - # removed when a non-execute statement is used that returns results, such as SQLTables. - - self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") - self.cursor.fetchone() - - self.cursor.tables("bogus") - - self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") - self.cursor.fetchone() - - def test_exc_integrity(self): - "Make sure an IntegretyError is raised" - # This is really making sure we are properly encoding and comparing the SQLSTATEs. - self.cursor.execute("create table t1(s1 varchar(10) primary key)") - self.cursor.execute("insert into t1 values ('one')") - self.assertRaises(pyodbc.IntegrityError, self.cursor.execute, "insert into t1 values ('one')") - - def test_emoticons_as_parameter(self): - # https://github.com/mkleehammer/pyodbc/issues/423 - # - # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number - # of characters. Ensure it works even with 4-byte characters. - # - # http://www.fileformat.info/info/unicode/char/1f31c/index.htm - - v = "x \U0001F31C z" - - self.cursor.execute("create table t1(s varchar(100))") - self.cursor.execute("insert into t1 values (?)", v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def test_emoticons_as_literal(self): - # similar to `test_emoticons_as_parameter`, above, except for Unicode literal - # - # http://www.fileformat.info/info/unicode/char/1f31c/index.htm - - v = "x \U0001F31C z" - - self.cursor.execute("create table t1(s varchar(100))") - self.cursor.execute("insert into t1 values (N'%s')" % v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def _test_tvp(self, diff_schema): - # https://github.com/mkleehammer/pyodbc/issues/290 - # - # pyodbc supports queries with table valued parameters in sql server - # - - if self.handle_known_issues_for('freetds', print_reminder=True): - warn('FREETDS_KNOWN_ISSUE - test_tvp: test cancelled.') - return - - procname = 'SelectTVP' - typename = 'TestTVP' - - if diff_schema: - schemaname = 'myschema' - procname = schemaname + '.' + procname - typenameonly = typename - typename = schemaname + '.' + typename - - # (Don't use "if exists" since older SQL Servers don't support it.) - try: - self.cursor.execute("drop procedure " + procname) - except: - pass - try: - self.cursor.execute("drop type " + typename) - except: - pass - if diff_schema: - try: - self.cursor.execute("drop schema " + schemaname) - except: - pass - self.cursor.commit() - - if diff_schema: - self.cursor.execute("CREATE SCHEMA myschema") - self.cursor.commit() - - query = "CREATE TYPE %s AS TABLE("\ - "c01 VARCHAR(255),"\ - "c02 VARCHAR(MAX),"\ - "c03 VARBINARY(255),"\ - "c04 VARBINARY(MAX),"\ - "c05 BIT,"\ - "c06 DATE,"\ - "c07 TIME,"\ - "c08 DATETIME2(5),"\ - "c09 BIGINT,"\ - "c10 FLOAT,"\ - "c11 NUMERIC(38, 24),"\ - "c12 UNIQUEIDENTIFIER)" % typename - - self.cursor.execute(query) - self.cursor.commit() - self.cursor.execute("CREATE PROCEDURE %s @TVP %s READONLY AS SELECT * FROM @TVP;" % (procname, typename)) - self.cursor.commit() - - long_string = '' - long_bytearray = [] - for i in range(255): - long_string += chr((i % 95) + 32) - long_bytearray.append(i % 255) - - very_long_string = '' - very_long_bytearray = [] - for i in range(2000000): - very_long_string += chr((i % 95) + 32) - very_long_bytearray.append(i % 255) - - c01 = ['abc', '', long_string] - - c02 = ['abc', '', very_long_string] - - c03 = [bytearray([0xD1, 0xCE, 0xFA, 0xCE]), - bytearray([0x00, 0x01, 0x02, 0x03, 0x04]), - bytearray(long_bytearray)] - - c04 = [bytearray([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), - bytearray([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), - bytearray(very_long_bytearray)] - - c05 = [1, 0, 1] - - c06 = [date(1997, 8, 29), - date(1, 1, 1), - date(9999, 12, 31)] - - c07 = [time(9, 13, 39), - time(0, 0, 0), - time(23, 59, 59)] - - c08 = [datetime(2018, 11, 13, 13, 33, 26, 298420), - datetime(1, 1, 1, 0, 0, 0, 0), - datetime(9999, 12, 31, 23, 59, 59, 999990)] - - c09 = [1234567, -9223372036854775808, 9223372036854775807] - - c10 = [3.14, -1.79E+308, 1.79E+308] - - c11 = [Decimal('31234567890123.141243449787580175325274'), - Decimal( '0.000000000000000000000001'), - Decimal('99999999999999.999999999999999999999999')] - - c12 = ['4FE34A93-E574-04CC-200A-353F0D1770B1', - '33F7504C-2BAC-1B83-01D1-7434A7BA6A17', - 'FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF'] - - param_array = [] - - for i in range (3): - param_array.append([c01[i], c02[i], c03[i], c04[i], c05[i], c06[i], c07[i], c08[i], c09[i], c10[i], c11[i], c12[i]]) - - success = True - - try: - p1 = [param_array] - if diff_schema: - p1 = [ [ typenameonly, schemaname ] + param_array ] - result_array = self.cursor.execute("exec %s ?" % procname, p1).fetchall() - except Exception as ex: - print("Failed to execute SelectTVP") - print("Exception: [" + type(ex).__name__ + "]" , ex.args) - - success = False - else: - for r in range(len(result_array)): - for c in range(len(result_array[r])): - if(result_array[r][c] != param_array[r][c]): - print("Mismatch at row " + str(r+1) + ", column " + str(c+1) + "; expected:", param_array[r][c] , " received:", result_array[r][c]) - success = False - - try: - p1 = [[]] - if diff_schema: - p1 = [ [ typenameonly, schemaname ] + [] ] - result_array = self.cursor.execute("exec %s ?" % procname, p1).fetchall() - self.assertEqual(result_array, []) - except Exception as ex: - print("Failed to execute SelectTVP") - print("Exception: [" + type(ex).__name__ + "]", ex.args) - success = False - - self.assertEqual(success, True) - - def test_columns(self): - self.cursor.execute( - """ - create table t1(n int, d datetime, c nvarchar(100)) - """) - - self.cursor.columns(table=u't1') - names = {row.column_name for row in self.cursor.fetchall()} - assert names == {'n', 'd', 'c'}, 'names=%r' % names - - self.cursor.columns(table=u't1', column=u'c') - row = self.cursor.fetchone() - assert row.column_name == 'c' - - # Same tests but with str instead of unicode. - self.cursor.columns(table='t1') - names = {row.column_name for row in self.cursor.fetchall()} - assert names == {'n', 'd', 'c'}, 'names=%r' % names - - self.cursor.columns(table='t1', column='c') - row = self.cursor.fetchone() - assert row.column_name == 'c' - - def test_tvp(self): - self._test_tvp(False) - - def test_tvp_diffschema(self): - self._test_tvp(True) - -def main(): - from optparse import OptionParser - parser = OptionParser(usage=usage) - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - - (options, args) = parser.parse_args() - - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - connection_string = load_setup_connection_string('sqlservertests') - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - if options.verbose: - cnxn = pyodbc.connect(connection_string) - print_library_info(cnxn) - cnxn.close() - - suite = load_tests(SqlServerTestCase, options.test, connection_string) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - - add_to_path() - - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/test.py b/tests2/test.py deleted file mode 100755 index 04a053b7..00000000 --- a/tests2/test.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python - -from testutils import * - -add_to_path() -import pyodbc - -def main(): - from optparse import OptionParser - parser = OptionParser() - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - - (options, args) = parser.parse_args() - - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - connection_string = load_setup_connection_string('test') - if not connection_string: - print('no connection string') - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - cnxn = pyodbc.connect(connection_string) - - if options.verbose: - print_library_info(cnxn) - - cursor = cnxn.cursor() - cursor.execute("select 'å' as uk, 'b' as jp") - row = cursor.fetchone() - print(row) - -if __name__ == '__main__': - main() - - diff --git a/tests2/test.xls b/tests2/test.xls deleted file mode 100644 index f5c5662a..00000000 Binary files a/tests2/test.xls and /dev/null differ diff --git a/tests2/testbase.py b/tests2/testbase.py deleted file mode 100755 index 6c57b9fe..00000000 --- a/tests2/testbase.py +++ /dev/null @@ -1,25 +0,0 @@ - -import unittest - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class TestBase(unittest.TestCase): - - diff --git a/tests2/testutils.py b/tests2/testutils.py deleted file mode 100755 index 4ecb20ec..00000000 --- a/tests2/testutils.py +++ /dev/null @@ -1,119 +0,0 @@ -from __future__ import print_function - -import os, sys, platform -from os.path import join, dirname, abspath, basename -import unittest -from distutils.util import get_platform - -def add_to_path(): - """ - Prepends the build directory to the path so that newly built pyodbc libraries are used, allowing it to be tested - without installing it. - """ - # Put the build directory into the Python path so we pick up the version we just built. - # - # To make this cross platform, we'll search the directories until we find the .pyd file. - - import imp - - library_exts = [ t[0] for t in imp.get_suffixes() if t[-1] == imp.C_EXTENSION ] - library_names = [ 'pyodbc%s' % ext for ext in library_exts ] - - # Only go into directories that match our version number. - - dir_suffix = '%s-%s.%s' % (get_platform(), sys.version_info[0], sys.version_info[1]) - - build = join(dirname(dirname(abspath(__file__))), 'build') - - for root, dirs, files in os.walk(build): - for d in dirs[:]: - if not d.endswith(dir_suffix): - dirs.remove(d) - - for name in library_names: - if name in files: - sys.path.insert(0, root) - print('Library:', join(root, name)) - return - - print('Did not find the pyodbc library in the build directory. Will use an installed version.') - - -def print_library_info(cnxn): - import pyodbc - print('python: %s' % sys.version) - print('pyodbc: %s %s' % (pyodbc.version, os.path.abspath(pyodbc.__file__))) - print('odbc: %s' % cnxn.getinfo(pyodbc.SQL_ODBC_VER)) - print('driver: %s %s' % (cnxn.getinfo(pyodbc.SQL_DRIVER_NAME), cnxn.getinfo(pyodbc.SQL_DRIVER_VER))) - print(' supports ODBC version %s' % cnxn.getinfo(pyodbc.SQL_DRIVER_ODBC_VER)) - print('os: %s' % platform.system()) - print('unicode: Py_Unicode=%s SQLWCHAR=%s' % (pyodbc.UNICODE_SIZE, pyodbc.SQLWCHAR_SIZE)) - - cursor = cnxn.cursor() - for typename in ['VARCHAR', 'WVARCHAR', 'BINARY']: - t = getattr(pyodbc, 'SQL_' + typename) - try: - cursor.getTypeInfo(t) - except pyodbc.Error as e: - print('Max %s = (not supported)' % (typename, )) - else: - row = cursor.fetchone() - print('Max %s = %s' % (typename, row and row[2] or '(not supported)')) - - if platform.system() == 'Windows': - print(' %s' % ' '.join([s for s in platform.win32_ver() if s])) - - -def load_tests(testclass, name, *args): - """ - Returns a TestSuite for tests in `testclass`. - - name - Optional test name if you only want to run 1 test. If not provided all tests in `testclass` will be loaded. - - args - Arguments for the test class constructor. These will be passed after the test method name. - """ - if name: - if not name.startswith('test_'): - name = 'test_%s' % name - names = [ name ] - - else: - names = [ method for method in dir(testclass) if method.startswith('test_') ] - - return unittest.TestSuite([ testclass(name, *args) for name in names ]) - - -def load_setup_connection_string(section): - """ - Attempts to read the default connection string from the setup.cfg file. - - If the file does not exist or if it exists but does not contain the connection string, None is returned. If the - file exists but cannot be parsed, an exception is raised. - """ - from os.path import exists, join, dirname, splitext, basename - from ConfigParser import SafeConfigParser - - FILENAME = 'setup.cfg' - KEY = 'connection-string' - - path = dirname(abspath(__file__)) - while True: - fqn = join(path, 'tmp', FILENAME) - if exists(fqn): - break - parent = dirname(path) - print('{} --> {}'.format(path, parent)) - if parent == path: - return None - path = parent - - try: - p = SafeConfigParser() - p.read(fqn) - except: - raise SystemExit('Unable to parse %s: %s' % (path, sys.exc_info()[1])) - - if p.has_option(section, KEY): - return p.get(section, KEY) diff --git a/tests3/__init__.py b/tests3/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests3/dbapi20.py b/tests3/dbapi20.py deleted file mode 100644 index 94567db8..00000000 --- a/tests3/dbapi20.py +++ /dev/null @@ -1,850 +0,0 @@ -#!/usr/bin/env python -''' Python DB API 2.0 driver compliance unit test suite. - - This software is Public Domain and may be used without restrictions. - - "Now we have booze and barflies entering the discussion, plus rumours of - DBAs on drugs... and I won't tell you what flashes through my mind each - time I read the subject line with 'Anal Compliance' in it. All around - this is turning out to be a thoroughly unwholesome unit test." - - -- Ian Bicking -''' - -__rcs_id__ = '$Id: dbapi20.py,v 1.10 2003/10/09 03:14:14 zenzen Exp $' -__version__ = '$Revision: 1.10 $'[11:-2] -__author__ = 'Stuart Bishop ' - -import unittest -import time - -# $Log: dbapi20.py,v $ -# Revision 1.10 2003/10/09 03:14:14 zenzen -# Add test for DB API 2.0 optional extension, where database exceptions -# are exposed as attributes on the Connection object. -# -# Revision 1.9 2003/08/13 01:16:36 zenzen -# Minor tweak from Stefan Fleiter -# -# Revision 1.8 2003/04/10 00:13:25 zenzen -# Changes, as per suggestions by M.-A. Lemburg -# - Add a table prefix, to ensure namespace collisions can always be avoided -# -# Revision 1.7 2003/02/26 23:33:37 zenzen -# Break out DDL into helper functions, as per request by David Rushby -# -# Revision 1.6 2003/02/21 03:04:33 zenzen -# Stuff from Henrik Ekelund: -# added test_None -# added test_nextset & hooks -# -# Revision 1.5 2003/02/17 22:08:43 zenzen -# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize -# defaults to 1 & generic cursor.callproc test added -# -# Revision 1.4 2003/02/15 00:16:33 zenzen -# Changes, as per suggestions and bug reports by M.-A. Lemburg, -# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar -# - Class renamed -# - Now a subclass of TestCase, to avoid requiring the driver stub -# to use multiple inheritance -# - Reversed the polarity of buggy test in test_description -# - Test exception hierarchy correctly -# - self.populate is now self._populate(), so if a driver stub -# overrides self.ddl1 this change propagates -# - VARCHAR columns now have a width, which will hopefully make the -# DDL even more portible (this will be reversed if it causes more problems) -# - cursor.rowcount being checked after various execute and fetchXXX methods -# - Check for fetchall and fetchmany returning empty lists after results -# are exhausted (already checking for empty lists if select retrieved -# nothing -# - Fix bugs in test_setoutputsize_basic and test_setinputsizes -# - -class DatabaseAPI20Test(unittest.TestCase): - ''' Test a database self.driver for DB API 2.0 compatibility. - This implementation tests Gadfly, but the TestCase - is structured so that other self.drivers can subclass this - test case to ensure compiliance with the DB-API. It is - expected that this TestCase may be expanded in the future - if ambiguities or edge conditions are discovered. - - The 'Optional Extensions' are not yet being tested. - - self.drivers should subclass this test, overriding setUp, tearDown, - self.driver, connect_args and connect_kw_args. Class specification - should be as follows: - - import dbapi20 - class mytest(dbapi20.DatabaseAPI20Test): - [...] - - Don't 'import DatabaseAPI20Test from dbapi20', or you will - confuse the unit tester - just 'import dbapi20'. - ''' - - # The self.driver module. This should be the module where the 'connect' - # method is to be found - driver = None - connect_args = () # List of arguments to pass to connect - connect_kw_args = {} # Keyword arguments for connect - table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables - - ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix - ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix - xddl1 = 'drop table %sbooze' % table_prefix - xddl2 = 'drop table %sbarflys' % table_prefix - - lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase - - # Some drivers may need to override these helpers, for example adding - # a 'commit' after the execute. - def executeDDL1(self,cursor): - cursor.execute(self.ddl1) - - def executeDDL2(self,cursor): - cursor.execute(self.ddl2) - - def setUp(self): - ''' self.drivers should override this method to perform required setup - if any is necessary, such as creating the database. - ''' - pass - - def tearDown(self): - ''' self.drivers should override this method to perform required cleanup - if any is necessary, such as deleting the test database. - The default drops the tables that may be created. - ''' - con = self._connect() - try: - cur = con.cursor() - for i, ddl in enumerate((self.xddl1,self.xddl2)): - try: - cur.execute(ddl) - con.commit() - except self.driver.Error: - # Assume table didn't exist. Other tests will check if - # execute is busted. - pass - finally: - con.close() - - def _connect(self): - try: - return self.driver.connect( - *self.connect_args,**self.connect_kw_args - ) - except AttributeError: - self.fail("No connect method found in self.driver module") - - def test_connect(self): - con = self._connect() - con.close() - - def test_apilevel(self): - try: - # Must exist - apilevel = self.driver.apilevel - # Must equal 2.0 - self.assertEqual(apilevel,'2.0') - except AttributeError: - self.fail("Driver doesn't define apilevel") - - def test_threadsafety(self): - try: - # Must exist - threadsafety = self.driver.threadsafety - # Must be a valid value - self.assertTrue(threadsafety in (0,1,2,3)) - except AttributeError: - self.fail("Driver doesn't define threadsafety") - - def test_paramstyle(self): - try: - # Must exist - paramstyle = self.driver.paramstyle - # Must be a valid value - self.assertTrue(paramstyle in ( - 'qmark','numeric','named','format','pyformat' - )) - except AttributeError: - self.fail("Driver doesn't define paramstyle") - - def test_Exceptions(self): - # Make sure required exceptions exist, and are in the - # defined hierarchy. - self.assertTrue(issubclass(self.driver.Warning,StandardError)) - self.assertTrue(issubclass(self.driver.Error,StandardError)) - self.assertTrue( - issubclass(self.driver.InterfaceError,self.driver.Error) - ) - self.assertTrue( - issubclass(self.driver.DatabaseError,self.driver.Error) - ) - self.assertTrue( - issubclass(self.driver.OperationalError,self.driver.Error) - ) - self.assertTrue( - issubclass(self.driver.IntegrityError,self.driver.Error) - ) - self.assertTrue( - issubclass(self.driver.InternalError,self.driver.Error) - ) - self.assertTrue( - issubclass(self.driver.ProgrammingError,self.driver.Error) - ) - self.assertTrue( - issubclass(self.driver.NotSupportedError,self.driver.Error) - ) - - def test_ExceptionsAsConnectionAttributes(self): - # OPTIONAL EXTENSION - # Test for the optional DB API 2.0 extension, where the exceptions - # are exposed as attributes on the Connection object - # I figure this optional extension will be implemented by any - # driver author who is using this test suite, so it is enabled - # by default. - con = self._connect() - drv = self.driver - self.assertTrue(con.Warning is drv.Warning) - self.assertTrue(con.Error is drv.Error) - self.assertTrue(con.InterfaceError is drv.InterfaceError) - self.assertTrue(con.DatabaseError is drv.DatabaseError) - self.assertTrue(con.OperationalError is drv.OperationalError) - self.assertTrue(con.IntegrityError is drv.IntegrityError) - self.assertTrue(con.InternalError is drv.InternalError) - self.assertTrue(con.ProgrammingError is drv.ProgrammingError) - self.assertTrue(con.NotSupportedError is drv.NotSupportedError) - - - def test_commit(self): - con = self._connect() - try: - # Commit must work, even if it doesn't do anything - con.commit() - finally: - con.close() - - def test_rollback(self): - con = self._connect() - # If rollback is defined, it should either work or throw - # the documented exception - if hasattr(con,'rollback'): - try: - con.rollback() - except self.driver.NotSupportedError: - pass - - def test_cursor(self): - con = self._connect() - try: - cur = con.cursor() - finally: - con.close() - - def test_cursor_isolation(self): - con = self._connect() - try: - # Make sure cursors created from the same connection have - # the documented transaction isolation level - cur1 = con.cursor() - cur2 = con.cursor() - self.executeDDL1(cur1) - cur1.execute("insert into %sbooze values ('Victoria Bitter')" % ( - self.table_prefix - )) - cur2.execute("select name from %sbooze" % self.table_prefix) - booze = cur2.fetchall() - self.assertEqual(len(booze),1) - self.assertEqual(len(booze[0]),1) - self.assertEqual(booze[0][0],'Victoria Bitter') - finally: - con.close() - - def test_description(self): - con = self._connect() - try: - cur = con.cursor() - self.executeDDL1(cur) - self.assertEqual(cur.description,None, - 'cursor.description should be none after executing a ' - 'statement that can return no rows (such as DDL)' - ) - cur.execute('select name from %sbooze' % self.table_prefix) - self.assertEqual(len(cur.description),1, - 'cursor.description describes too many columns' - ) - self.assertEqual(len(cur.description[0]),7, - 'cursor.description[x] tuples must have 7 elements' - ) - self.assertEqual(cur.description[0][0].lower(),'name', - 'cursor.description[x][0] must return column name' - ) - self.assertEqual(cur.description[0][1],self.driver.STRING, - 'cursor.description[x][1] must return column type. Got %r' - % cur.description[0][1] - ) - - # Make sure self.description gets reset - self.executeDDL2(cur) - self.assertEqual(cur.description,None, - 'cursor.description not being set to None when executing ' - 'no-result statements (eg. DDL)' - ) - finally: - con.close() - - def test_rowcount(self): - con = self._connect() - try: - cur = con.cursor() - self.executeDDL1(cur) - self.assertEqual(cur.rowcount,-1, - 'cursor.rowcount should be -1 after executing no-result ' - 'statements' - ) - cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( - self.table_prefix - )) - self.assertTrue(cur.rowcount in (-1,1), - 'cursor.rowcount should == number or rows inserted, or ' - 'set to -1 after executing an insert statement' - ) - cur.execute("select name from %sbooze" % self.table_prefix) - self.assertTrue(cur.rowcount in (-1,1), - 'cursor.rowcount should == number of rows returned, or ' - 'set to -1 after executing a select statement' - ) - self.executeDDL2(cur) - self.assertEqual(cur.rowcount,-1, - 'cursor.rowcount not being reset to -1 after executing ' - 'no-result statements' - ) - finally: - con.close() - - lower_func = 'lower' - def test_callproc(self): - con = self._connect() - try: - cur = con.cursor() - if self.lower_func and hasattr(cur,'callproc'): - r = cur.callproc(self.lower_func,('FOO',)) - self.assertEqual(len(r),1) - self.assertEqual(r[0],'FOO') - r = cur.fetchall() - self.assertEqual(len(r),1,'callproc produced no result set') - self.assertEqual(len(r[0]),1, - 'callproc produced invalid result set' - ) - self.assertEqual(r[0][0],'foo', - 'callproc produced invalid results' - ) - finally: - con.close() - - def test_close(self): - con = self._connect() - try: - cur = con.cursor() - finally: - con.close() - - # cursor.execute should raise an Error if called after connection - # closed - self.assertRaises(self.driver.Error,self.executeDDL1,cur) - - # connection.commit should raise an Error if called after connection' - # closed.' - self.assertRaises(self.driver.Error,con.commit) - - # connection.close should raise an Error if called more than once - self.assertRaises(self.driver.Error,con.close) - - def test_execute(self): - con = self._connect() - try: - cur = con.cursor() - self._paraminsert(cur) - finally: - con.close() - - def _paraminsert(self,cur): - self.executeDDL1(cur) - cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( - self.table_prefix - )) - self.assertTrue(cur.rowcount in (-1,1)) - - if self.driver.paramstyle == 'qmark': - cur.execute( - 'insert into %sbooze values (?)' % self.table_prefix, - ("Cooper's",) - ) - elif self.driver.paramstyle == 'numeric': - cur.execute( - 'insert into %sbooze values (:1)' % self.table_prefix, - ("Cooper's",) - ) - elif self.driver.paramstyle == 'named': - cur.execute( - 'insert into %sbooze values (:beer)' % self.table_prefix, - {'beer':"Cooper's"} - ) - elif self.driver.paramstyle == 'format': - cur.execute( - 'insert into %sbooze values (%%s)' % self.table_prefix, - ("Cooper's",) - ) - elif self.driver.paramstyle == 'pyformat': - cur.execute( - 'insert into %sbooze values (%%(beer)s)' % self.table_prefix, - {'beer':"Cooper's"} - ) - else: - self.fail('Invalid paramstyle') - self.assertTrue(cur.rowcount in (-1,1)) - - cur.execute('select name from %sbooze' % self.table_prefix) - res = cur.fetchall() - self.assertEqual(len(res),2,'cursor.fetchall returned too few rows') - beers = [res[0][0],res[1][0]] - beers.sort() - self.assertEqual(beers[0],"Cooper's", - 'cursor.fetchall retrieved incorrect data, or data inserted ' - 'incorrectly' - ) - self.assertEqual(beers[1],"Victoria Bitter", - 'cursor.fetchall retrieved incorrect data, or data inserted ' - 'incorrectly' - ) - - def test_executemany(self): - con = self._connect() - try: - cur = con.cursor() - self.executeDDL1(cur) - largs = [ ("Cooper's",) , ("Boag's",) ] - margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ] - if self.driver.paramstyle == 'qmark': - cur.executemany( - 'insert into %sbooze values (?)' % self.table_prefix, - largs - ) - elif self.driver.paramstyle == 'numeric': - cur.executemany( - 'insert into %sbooze values (:1)' % self.table_prefix, - largs - ) - elif self.driver.paramstyle == 'named': - cur.executemany( - 'insert into %sbooze values (:beer)' % self.table_prefix, - margs - ) - elif self.driver.paramstyle == 'format': - cur.executemany( - 'insert into %sbooze values (%%s)' % self.table_prefix, - largs - ) - elif self.driver.paramstyle == 'pyformat': - cur.executemany( - 'insert into %sbooze values (%%(beer)s)' % ( - self.table_prefix - ), - margs - ) - else: - self.fail('Unknown paramstyle') - self.assertTrue(cur.rowcount in (-1,2), - 'insert using cursor.executemany set cursor.rowcount to ' - 'incorrect value %r' % cur.rowcount - ) - cur.execute('select name from %sbooze' % self.table_prefix) - res = cur.fetchall() - self.assertEqual(len(res),2, - 'cursor.fetchall retrieved incorrect number of rows' - ) - beers = [res[0][0],res[1][0]] - beers.sort() - self.assertEqual(beers[0],"Boag's",'incorrect data retrieved') - self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved') - finally: - con.close() - - def test_fetchone(self): - con = self._connect() - try: - cur = con.cursor() - - # cursor.fetchone should raise an Error if called before - # executing a select-type query - self.assertRaises(self.driver.Error,cur.fetchone) - - # cursor.fetchone should raise an Error if called after - # executing a query that cannot return rows - self.executeDDL1(cur) - self.assertRaises(self.driver.Error,cur.fetchone) - - cur.execute('select name from %sbooze' % self.table_prefix) - self.assertEqual(cur.fetchone(),None, - 'cursor.fetchone should return None if a query retrieves ' - 'no rows' - ) - self.assertTrue(cur.rowcount in (-1,0)) - - # cursor.fetchone should raise an Error if called after - # executing a query that cannot return rows - cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( - self.table_prefix - )) - self.assertRaises(self.driver.Error,cur.fetchone) - - cur.execute('select name from %sbooze' % self.table_prefix) - r = cur.fetchone() - self.assertEqual(len(r),1, - 'cursor.fetchone should have retrieved a single row' - ) - self.assertEqual(r[0],'Victoria Bitter', - 'cursor.fetchone retrieved incorrect data' - ) - self.assertEqual(cur.fetchone(),None, - 'cursor.fetchone should return None if no more rows available' - ) - self.assertTrue(cur.rowcount in (-1,1)) - finally: - con.close() - - samples = [ - 'Carlton Cold', - 'Carlton Draft', - 'Mountain Goat', - 'Redback', - 'Victoria Bitter', - 'XXXX' - ] - - def _populate(self): - ''' Return a list of sql commands to setup the DB for the fetch - tests. - ''' - populate = [ - "insert into %sbooze values ('%s')" % (self.table_prefix,s) - for s in self.samples - ] - return populate - - def test_fetchmany(self): - con = self._connect() - try: - cur = con.cursor() - - # cursor.fetchmany should raise an Error if called without - #issuing a query - self.assertRaises(self.driver.Error,cur.fetchmany,4) - - self.executeDDL1(cur) - for sql in self._populate(): - cur.execute(sql) - - cur.execute('select name from %sbooze' % self.table_prefix) - r = cur.fetchmany() - self.assertEqual(len(r),1, - 'cursor.fetchmany retrieved incorrect number of rows, ' - 'default of arraysize is one.' - ) - cur.arraysize=10 - r = cur.fetchmany(3) # Should get 3 rows - self.assertEqual(len(r),3, - 'cursor.fetchmany retrieved incorrect number of rows' - ) - r = cur.fetchmany(4) # Should get 2 more - self.assertEqual(len(r),2, - 'cursor.fetchmany retrieved incorrect number of rows' - ) - r = cur.fetchmany(4) # Should be an empty sequence - self.assertEqual(len(r),0, - 'cursor.fetchmany should return an empty sequence after ' - 'results are exhausted' - ) - self.assertTrue(cur.rowcount in (-1,6)) - - # Same as above, using cursor.arraysize - cur.arraysize=4 - cur.execute('select name from %sbooze' % self.table_prefix) - r = cur.fetchmany() # Should get 4 rows - self.assertEqual(len(r),4, - 'cursor.arraysize not being honoured by fetchmany' - ) - r = cur.fetchmany() # Should get 2 more - self.assertEqual(len(r),2) - r = cur.fetchmany() # Should be an empty sequence - self.assertEqual(len(r),0) - self.assertTrue(cur.rowcount in (-1,6)) - - cur.arraysize=6 - cur.execute('select name from %sbooze' % self.table_prefix) - rows = cur.fetchmany() # Should get all rows - self.assertTrue(cur.rowcount in (-1,6)) - self.assertEqual(len(rows),6) - self.assertEqual(len(rows),6) - rows = [r[0] for r in rows] - rows.sort() - - # Make sure we get the right data back out - for i in range(0,6): - self.assertEqual(rows[i],self.samples[i], - 'incorrect data retrieved by cursor.fetchmany' - ) - - rows = cur.fetchmany() # Should return an empty list - self.assertEqual(len(rows),0, - 'cursor.fetchmany should return an empty sequence if ' - 'called after the whole result set has been fetched' - ) - self.assertTrue(cur.rowcount in (-1,6)) - - self.executeDDL2(cur) - cur.execute('select name from %sbarflys' % self.table_prefix) - r = cur.fetchmany() # Should get empty sequence - self.assertEqual(len(r),0, - 'cursor.fetchmany should return an empty sequence if ' - 'query retrieved no rows' - ) - self.assertTrue(cur.rowcount in (-1,0)) - - finally: - con.close() - - def test_fetchall(self): - con = self._connect() - try: - cur = con.cursor() - # cursor.fetchall should raise an Error if called - # without executing a query that may return rows (such - # as a select) - self.assertRaises(self.driver.Error, cur.fetchall) - - self.executeDDL1(cur) - for sql in self._populate(): - cur.execute(sql) - - # cursor.fetchall should raise an Error if called - # after executing a a statement that cannot return rows - self.assertRaises(self.driver.Error,cur.fetchall) - - cur.execute('select name from %sbooze' % self.table_prefix) - rows = cur.fetchall() - self.assertTrue(cur.rowcount in (-1,len(self.samples))) - self.assertEqual(len(rows),len(self.samples), - 'cursor.fetchall did not retrieve all rows' - ) - rows = [r[0] for r in rows] - rows.sort() - for i in range(0,len(self.samples)): - self.assertEqual(rows[i],self.samples[i], - 'cursor.fetchall retrieved incorrect rows' - ) - rows = cur.fetchall() - self.assertEqual( - len(rows),0, - 'cursor.fetchall should return an empty list if called ' - 'after the whole result set has been fetched' - ) - self.assertTrue(cur.rowcount in (-1,len(self.samples))) - - self.executeDDL2(cur) - cur.execute('select name from %sbarflys' % self.table_prefix) - rows = cur.fetchall() - self.assertTrue(cur.rowcount in (-1,0)) - self.assertEqual(len(rows),0, - 'cursor.fetchall should return an empty list if ' - 'a select query returns no rows' - ) - - finally: - con.close() - - def test_mixedfetch(self): - con = self._connect() - try: - cur = con.cursor() - self.executeDDL1(cur) - for sql in self._populate(): - cur.execute(sql) - - cur.execute('select name from %sbooze' % self.table_prefix) - rows1 = cur.fetchone() - rows23 = cur.fetchmany(2) - rows4 = cur.fetchone() - rows56 = cur.fetchall() - self.assertTrue(cur.rowcount in (-1,6)) - self.assertEqual(len(rows23),2, - 'fetchmany returned incorrect number of rows' - ) - self.assertEqual(len(rows56),2, - 'fetchall returned incorrect number of rows' - ) - - rows = [rows1[0]] - rows.extend([rows23[0][0],rows23[1][0]]) - rows.append(rows4[0]) - rows.extend([rows56[0][0],rows56[1][0]]) - rows.sort() - for i in range(0,len(self.samples)): - self.assertEqual(rows[i],self.samples[i], - 'incorrect data retrieved or inserted' - ) - finally: - con.close() - - def help_nextset_setUp(self,cur): - ''' Should create a procedure called deleteme - that returns two result sets, first the - number of rows in booze then "name from booze" - ''' - raise NotImplementedError,'Helper not implemented' - #sql=""" - # create procedure deleteme as - # begin - # select count(*) from booze - # select name from booze - # end - #""" - #cur.execute(sql) - - def help_nextset_tearDown(self,cur): - 'If cleaning up is needed after nextSetTest' - raise NotImplementedError,'Helper not implemented' - #cur.execute("drop procedure deleteme") - - def test_nextset(self): - con = self._connect() - try: - cur = con.cursor() - if not hasattr(cur,'nextset'): - return - - try: - self.executeDDL1(cur) - sql=self._populate() - for sql in self._populate(): - cur.execute(sql) - - self.help_nextset_setUp(cur) - - cur.callproc('deleteme') - numberofrows=cur.fetchone() - assert numberofrows[0]== len(self.samples) - assert cur.nextset() - names=cur.fetchall() - assert len(names) == len(self.samples) - s=cur.nextset() - assert s == None,'No more return sets, should return None' - finally: - self.help_nextset_tearDown(cur) - - finally: - con.close() - - def test_nextset(self): - raise NotImplementedError,'Drivers need to override this test' - - def test_arraysize(self): - # Not much here - rest of the tests for this are in test_fetchmany - con = self._connect() - try: - cur = con.cursor() - self.assertTrue(hasattr(cur,'arraysize'), - 'cursor.arraysize must be defined' - ) - finally: - con.close() - - def test_setinputsizes(self): - con = self._connect() - try: - cur = con.cursor() - cur.setinputsizes( (25,) ) - self._paraminsert(cur) # Make sure cursor still works - finally: - con.close() - - def test_setoutputsize_basic(self): - # Basic test is to make sure setoutputsize doesn't blow up - con = self._connect() - try: - cur = con.cursor() - cur.setoutputsize(1000) - cur.setoutputsize(2000,0) - self._paraminsert(cur) # Make sure the cursor still works - finally: - con.close() - - def test_setoutputsize(self): - # Real test for setoutputsize is driver dependent - raise NotImplementedError,'Driver need to override this test' - - def test_None(self): - con = self._connect() - try: - cur = con.cursor() - self.executeDDL1(cur) - cur.execute('insert into %sbooze values (NULL)' % self.table_prefix) - cur.execute('select name from %sbooze' % self.table_prefix) - r = cur.fetchall() - self.assertEqual(len(r),1) - self.assertEqual(len(r[0]),1) - self.assertEqual(r[0][0],None,'NULL value not returned as None') - finally: - con.close() - - def test_Date(self): - d1 = self.driver.Date(2002,12,25) - d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0))) - # Can we assume this? API doesn't specify, but it seems implied - # self.assertEqual(str(d1),str(d2)) - - def test_Time(self): - t1 = self.driver.Time(13,45,30) - t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0))) - # Can we assume this? API doesn't specify, but it seems implied - # self.assertEqual(str(t1),str(t2)) - - def test_Timestamp(self): - t1 = self.driver.Timestamp(2002,12,25,13,45,30) - t2 = self.driver.TimestampFromTicks( - time.mktime((2002,12,25,13,45,30,0,0,0)) - ) - # Can we assume this? API doesn't specify, but it seems implied - # self.assertEqual(str(t1),str(t2)) - - def test_Binary(self): - b = self.driver.Binary('Something') - b = self.driver.Binary('') - - def test_STRING(self): - self.assertTrue(hasattr(self.driver,'STRING'), - 'module.STRING must be defined' - ) - - def test_BINARY(self): - self.assertTrue(hasattr(self.driver,'BINARY'), - 'module.BINARY must be defined.' - ) - - def test_NUMBER(self): - self.assertTrue(hasattr(self.driver,'NUMBER'), - 'module.NUMBER must be defined.' - ) - - def test_DATETIME(self): - self.assertTrue(hasattr(self.driver,'DATETIME'), - 'module.DATETIME must be defined.' - ) - - def test_ROWID(self): - self.assertTrue(hasattr(self.driver,'ROWID'), - 'module.ROWID must be defined.' - ) - diff --git a/tests3/dbapi_SQLParamData_memory__test.py b/tests3/dbapi_SQLParamData_memory__test.py deleted file mode 100644 index 0220c9bb..00000000 --- a/tests3/dbapi_SQLParamData_memory__test.py +++ /dev/null @@ -1,95 +0,0 @@ -""" -This tests ensures that there is no memory leakage -after using SQLParamData that returns -1. - -One scenario where SQLParamData function will be used is when there is a parameterized -INSERT INTO query with at least one parameter's length is too big. -Note that In my case, 'too big' means pGetLen(pInfo->pObject) was more than 4000. - -In order to execute the INSERT INTO query, SQLExecute is used. -SQLExecute will return SQL_NEED_DATA (SQL_NEED_DATA = 99), -then SQLParamData will be used to create a SQL parameter and will return SQL_NEED_DATA. -This call will create the pObject (PyObject) that should be freed. -After that SQLPutData will be used in a loop to save the data in this SQL parameter. -Then SQLParamData is called again, and if there is an error (-1), the data of newly -created SQL Parameter should be freed. - -This test should be tested against a table that has no space at all or no space in the -transaction log in order to get -1 value on the second call to SQLParamData. -The name of the table is stored in `TABLE_NAME`, change it to be your table's name. -""" -import gc -import os -import unittest - -import math -import psutil - -from tests3.testutils import add_to_path, load_setup_connection_string - -add_to_path() -import pyodbc - -KB = 1024 -MB = KB * 1024 - -ACCEPTABLE_MEMORY_DIFF = 500 * KB - -TABLE_NAME = "FullTable" - -CONNECTION_STRING = None - -CONNECTION_STRING_ERROR_MESSAGE = ( - r"Please create tmp\setup.cfg file or set a valid value to CONNECTION_STRING." -) - - -def current_total_memory_usage(): - """ - :return: Current total memory usage in bytes. - """ - process = psutil.Process(os.getpid()) - return process.memory_info().rss - - -class MemoryLeakSQLParamDataTestCase(unittest.TestCase): - driver = pyodbc - - @classmethod - def setUpClass(cls): - filename = os.path.splitext(os.path.basename(__file__))[0] - cls.connection_string = ( - load_setup_connection_string(filename) or CONNECTION_STRING - ) - - if not cls.connection_string: - return ValueError(CONNECTION_STRING_ERROR_MESSAGE) - - def test_memory_leak(self): - query = "INSERT INTO {table_name} VALUES (?)".format(table_name=TABLE_NAME) - - with pyodbc.connect(self.connection_string) as conn: - cursor = conn.cursor() - - current_memory_usage = current_total_memory_usage() - - try: - cur = cursor.execute(query, "a" * 10 * MB) - except self.driver.ProgrammingError as e: - self.assertEqual("42000", e.args[0]) - self.assertIn("SQLParamData", e.args[1]) - finally: - cursor.close() - - after_excpetion_memory_usage = current_total_memory_usage() - - diff = math.fabs(after_excpetion_memory_usage - current_memory_usage) - self.assertLess(diff, ACCEPTABLE_MEMORY_DIFF) - - -def main(): - unittest.main() - - -if __name__ == "__main__": - main() diff --git a/tests3/dbapi_SQLParamData_memory__test__requirements.txt b/tests3/dbapi_SQLParamData_memory__test__requirements.txt deleted file mode 100644 index 0b574b52..00000000 --- a/tests3/dbapi_SQLParamData_memory__test__requirements.txt +++ /dev/null @@ -1 +0,0 @@ -psutil \ No newline at end of file diff --git a/tests3/dbapitests.py b/tests3/dbapitests.py deleted file mode 100644 index 1d38b5e0..00000000 --- a/tests3/dbapitests.py +++ /dev/null @@ -1,47 +0,0 @@ -import sys -import unittest -from testutils import * -import dbapi20 - -def main(): - add_to_path() - import pyodbc - - from argparse import ArgumentParser - parser = ArgumentParser(usage="%(prog)s [options] connection_string") - parser.add_argument("-v", "--verbose", action="count", help="increment test verbosity (can be used multiple times)") - parser.add_argument("-d", "--debug", action="store_true", default=False, help="print debugging items") - parser.add_argument("conn_str", nargs="*", help="database connection string") - - args = parser.parse_args() - - if len(args.conn_str) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - connection_string = load_setup_connection_string('dbapitests') - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args.conn_str[0] - - class test_pyodbc(dbapi20.DatabaseAPI20Test): - driver = pyodbc - connect_args = [ connection_string ] - connect_kw_args = {} - - def test_nextset(self): pass - def test_setoutputsize(self): pass - def test_ExceptionsAsConnectionAttributes(self): pass - - suite = unittest.makeSuite(test_pyodbc, 'test') - testRunner = unittest.TextTestRunner(verbosity=(args.verbose > 1) and 9 or 0) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests3/empty.accdb b/tests3/empty.accdb deleted file mode 100644 index 95cc8ae5..00000000 Binary files a/tests3/empty.accdb and /dev/null differ diff --git a/tests3/empty.mdb b/tests3/empty.mdb deleted file mode 100644 index dac96e8c..00000000 Binary files a/tests3/empty.mdb and /dev/null differ diff --git a/tests3/issue802.py b/tests3/issue802.py deleted file mode 100644 index a5af13d5..00000000 --- a/tests3/issue802.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -This tests ensures that there is no memory leakage -when params.cpp:ExecuteMulti function does conversion of Unicode to Bytes. - -In ExecuteMulti function after DoExecute label - -SQLExecute returns - -One scenario where SQLParamData function will be used is when there is a varchar(max), -a parameter with an unknown size in the INSERT INTO query. -In this case, a unicode string is being added to a varchar(max) field. - -In order to execute the INSERT INTO query, SQLExecute is used. SQLExecute will return -SQL_NEED_DATA (SQL_NEED_DATA = 99). Then SQLParamData will be used to create a SQL -parameter and will return SQL_NEED_DATA too. When PyUnicode_Check(pInfo->cell) is true, -a conversion of Unicode to Bytes is required before it can be used by SQLPutData. -During this conversion a new PyObject, called bytes, is created and assigned to objCell. -This object never gets Py_XDECREF, and the data will stay stuck in the memory without a -reference. - -This memory leak is only visible when using varchar(max) because varchar(max) required -additional allocation of memory that correspond to the size of the input while -varchar(100) for example will not case another SQL_NEED_DATA status. - -To see how to reproduce the memory leak, -look at https://github.com/mkleehammer/pyodbc/issues/802 -""" -import os -import unittest - -import psutil - -from tests3.testutils import add_to_path, load_setup_connection_string - -add_to_path() -import pyodbc - -KB = 1024 -MB = KB * 1024 - -CONNECTION_STRING = None - -CONNECTION_STRING_ERROR_MESSAGE = ( - r"Please create tmp\setup.cfg file or set a valid value to CONNECTION_STRING." -) - -process = psutil.Process() - - -def memory(): - return process.memory_info().vms - - -class SQLPutDataUnicodeToBytesMemoryLeakTestCase(unittest.TestCase): - driver = pyodbc - - @classmethod - def setUpClass(cls): - filename = os.path.splitext(os.path.basename(__file__))[0] - cls.connection_string = ( - load_setup_connection_string(filename) or CONNECTION_STRING - ) - - if not cls.connection_string: - return ValueError(CONNECTION_STRING_ERROR_MESSAGE) - - def test__varchar_max__inserting_many_rows__same_memory_usage(self): - varchar_limit = "max" - num_rows = 50_000 - data = [(i, f"col{i:06}", 3.14159265 * (i + 1)) for i in range(num_rows)] - table_name = "pd_test" - col_names = ["id", "txt_col", "float_col"] - ins_sql = f"INSERT INTO {table_name} ({','.join(col_names)}) VALUES ({','.join('?' * len(col_names))})" - - with pyodbc.connect(self.connection_string, autocommit=True) as cnxn: - # First time adds memory, not related to the test. - self.action(cnxn, data, ins_sql, table_name, varchar_limit) - for iteration in range(3): - start_memory = memory() - self.action(cnxn, data, ins_sql, table_name, varchar_limit) - end_memory = memory() - memory_diff = end_memory - start_memory - self.assertLess(memory_diff, 100 * KB) - - def action(self, cnxn, data, ins_sql, table_name, varchar_limit): - crsr = cnxn.cursor() - crsr.execute(f"DROP TABLE IF EXISTS {table_name}") - crsr.execute( - f"CREATE TABLE {table_name} (id int, txt_col varchar({varchar_limit}), float_col float(53))" - ) - crsr.fast_executemany = True - crsr.executemany(ins_sql, data) - crsr.close() - - -def main(): - unittest.main() - - -if __name__ == "__main__": - main() diff --git a/tests3/issue998.py b/tests3/issue998.py deleted file mode 100644 index 24e58e2a..00000000 --- a/tests3/issue998.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 -""" -Verify that no warning is emitted for `PyUnicode_FromUnicode(NULL, size)`. - -See https://github.com/mkleehammer/pyodbc/issues/998. -See also https://bugs.python.org/issue36346. -""" - -import io -import os -import sys -import unittest - -# pylint: disable-next=import-error -from tests3.testutils import add_to_path, load_setup_connection_string - -add_to_path() -import pyodbc # pylint: disable=wrong-import-position - -KB = 1024 -MB = KB * 1024 - -CONNECTION_STRING = None - -CONNECTION_STRING_ERROR_MESSAGE = ( - "Please create tmp/setup.cfg file or " - "set a valid value to CONNECTION_STRING." -) -NO_ERROR = None - - -class SQLPutDataUnicodeToBytesMemoryLeakTestCase(unittest.TestCase): - """Test case for issue998 bug fix.""" - - driver = pyodbc - - @classmethod - def setUpClass(cls): - """Set the connection string.""" - - filename = os.path.splitext(os.path.basename(__file__))[0] - cls.connection_string = ( - load_setup_connection_string(filename) or CONNECTION_STRING - ) - - if cls.connection_string: - return NO_ERROR - return ValueError(CONNECTION_STRING_ERROR_MESSAGE) - - def test_use_correct_unicode_factory_function(self): - """Verify that the obsolete function call has been replaced.""" - - # Create a results set. - with pyodbc.connect(self.connection_string, autocommit=True) as cnxn: - cursor = cnxn.cursor() - cursor.execute("SELECT 1 AS a, 2 AS b") - rows = cursor.fetchall() - - # Redirect stderr so we can detect the warning. - sys.stderr = redirected_stderr = io.StringIO() - - # Convert the results object to a string. - self.assertGreater(len(str(rows)), 0) - - # Restore stderr to the original stream. - sys.stderr = sys.__stderr__ - - # If the bug has been fixed, nothing will have been written to stderr. - self.assertEqual(len(redirected_stderr.getvalue()), 0) - - -def main(): - """Top-level driver for the test.""" - unittest.main() - - -if __name__ == "__main__": - main() diff --git a/tests3/mysqltests.py b/tests3/mysqltests.py deleted file mode 100644 index 131d68f1..00000000 --- a/tests3/mysqltests.py +++ /dev/null @@ -1,812 +0,0 @@ -#!/usr/bin/env python3 - -usage = """\ -%(prog)s [options] connection_string - -Unit tests for MySQL. To use, pass a connection string as the parameter. -The tests will create and drop tables t1 and t2 as necessary. - -These tests use the pyodbc library from the build directory, not the version installed in your -Python directories. You must run `python setup.py build` before running these tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [mysqltests] - connection-string=DRIVER=MySQL ODBC 8.0 ANSI Driver;charset=utf8mb4;SERVER=localhost;DATABASE=pyodbc;UID=root;PWD=rootpw - -Note: Use the "ANSI" (not the "Unicode") driver and include charset=utf8mb4 in the connection string so the high-Unicode tests won't fail. -""" - -import sys, os, re -import unittest -from decimal import Decimal -from datetime import datetime, date, time -from os.path import join, getsize, dirname, abspath, basename - -if __name__ != '__main__': - import pyodbc - -import testutils - - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of composed of `seed` to make a string `length` characters long. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) // len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class MySqlTestCase(unittest.TestCase): - - INTEGERS = [ -1, 0, 1, 0x7FFFFFFF ] - BIGINTS = INTEGERS + [ 0xFFFFFFFF, 0x123456789 ] - - SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] - LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] - - STR_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] - BLOB_FENCEPOSTS = STR_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] - - def __init__(self, method_name, connection_string=None): - unittest.TestCase.__init__(self, method_name) - if connection_string is not None: - self.connection_string = connection_string - else: - # if the connection string cannot be provided directly here, it can be - # provided in an environment variable - self.connection_string = os.environ['PYODBC_CONN_STR'] - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string) - self.cursor = self.cnxn.cursor() - - # As of libmyodbc5w 5.3 SQLGetTypeInfo returns absurdly small sizes - # leading to slow writes. Override them: - self.cnxn.maxwrite = 1024 * 1024 * 1024 - - # My MySQL configuration (and I think the default) sends *everything* - # in UTF-8. The pyodbc default is to send Unicode as UTF-16 and to - # decode WCHAR via UTF-16. Change them both to UTF-8. - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') - self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') - self.cnxn.setencoding(encoding='utf-8') - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - for i in range(3): - try: - self.cursor.execute("drop procedure proc%d" % i) - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_multiple_bindings(self): - "More than one bind and select on a cursor" - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t1 values (?)", 2) - self.cursor.execute("insert into t1 values (?)", 3) - for i in range(3): - self.cursor.execute("select n from t1 where n < ?", 10) - self.cursor.execute("select n from t1 where n < 3") - - - def test_different_bindings(self): - self.cursor.execute("create table t1(n int)") - self.cursor.execute("create table t2(d datetime)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t2 values (?)", datetime.now()) - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, int)) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - def _test_strtype(self, sqltype, value, colsize=None): - """ - The implementation for string and binary tests. - """ - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - try: - self.cursor.execute(sql) - except: - print('>>>>', sql) - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - - # Removing this check for now until I get the charset working properly. - # If we use latin1, results are 'str' instead of 'unicode', which would be - # correct. Setting charset to ucs-2 causes a crash in SQLGetTypeInfo(SQL_DATETIME). - # self.assertEqual(type(v), type(value)) - - if value is not None: - self.assertEqual(len(v), len(value)) - - self.assertEqual(v, value) - - # - # varchar - # - - def test_varchar_null(self): - self._test_strtype('varchar', None, 100) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, max(1, len(value))) - return t - for value in STR_FENCEPOSTS: - locals()['test_varchar_%s' % len(value)] = _maketest(value) - - def test_varchar_many(self): - self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") - - v1 = 'ABCDEFGHIJ' * 30 - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); - row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() - - self.assertEqual(v1, row.c1) - self.assertEqual(v2, row.c2) - self.assertEqual(v3, row.c3) - - def test_varchar_upperlatin(self): - self._test_strtype('varchar', u'á', colsize=3) - - def test_utf16(self): - self.cursor.execute("create table t1(c1 varchar(100) character set utf16, c2 varchar(100))") - self.cursor.execute("insert into t1 values ('test', 'test')") - value = "test" - row = self.cursor.execute("select c1,c2 from t1").fetchone() - for v in row: - self.assertEqual(type(v), str) - self.assertEqual(v, value) - - # - # binary - # - - def test_null_binary(self): - self._test_strtype('varbinary', None, 100) - - def test_large_null_binary(self): - # Bug 1575064 - self._test_strtype('varbinary', None, 4000) - - # Generate a test for each fencepost size: test_binary_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varbinary', bytes(value, 'utf-8'), max(1, len(value))) - return t - for value in STR_FENCEPOSTS: - locals()['test_binary_%s' % len(value)] = _maketest(value) - - # - # blob - # - - def test_blob_null(self): - self._test_strtype('blob', None) - - # Generate a test for each fencepost size: test_blob_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('blob', bytes(value, 'utf-8')) - return t - for value in BLOB_FENCEPOSTS: - locals()['test_blob_%s' % len(value)] = _maketest(value) - - def test_blob_upperlatin(self): - self._test_strtype('blob', bytes('á', 'utf-8')) - - # - # text - # - - def test_null_text(self): - self._test_strtype('text', None) - - # Generate a test for each fencepost size: test_text_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('text', value) - return t - for value in STR_FENCEPOSTS: - locals()['test_text_%s' % len(value)] = _maketest(value) - - def test_text_upperlatin(self): - self._test_strtype('text', 'á') - - # - # unicode - # - - def test_unicode_query(self): - self.cursor.execute(u"select 1") - - # - # bit - # - - # The MySQL driver maps BIT columns to the ODBC bit data type, but they aren't behaving quite like a Boolean value - # (which is what the ODBC bit data type really represents). The MySQL BOOL data type is just an alias for a small - # integer, so pyodbc can't recognize it and map it back to True/False. - # - # You can use both BIT and BOOL and they will act as you expect if you treat them as integers. You can write 0 and - # 1 to them and they will work. - - # def test_bit(self): - # value = True - # self.cursor.execute("create table t1(b bit)") - # self.cursor.execute("insert into t1 values (?)", value) - # v = self.cursor.execute("select b from t1").fetchone()[0] - # self.assertEqual(type(v), bool) - # self.assertEqual(v, value) - # - # def test_bit_string_true(self): - # self.cursor.execute("create table t1(b bit)") - # self.cursor.execute("insert into t1 values (?)", "xyzzy") - # v = self.cursor.execute("select b from t1").fetchone()[0] - # self.assertEqual(type(v), bool) - # self.assertEqual(v, True) - # - # def test_bit_string_false(self): - # self.cursor.execute("create table t1(b bit)") - # self.cursor.execute("insert into t1 values (?)", "") - # v = self.cursor.execute("select b from t1").fetchone()[0] - # self.assertEqual(type(v), bool) - # self.assertEqual(v, False) - - # - # decimal - # - - def test_small_decimal(self): - # value = Decimal('1234567890987654321') - value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) - self.cursor.execute("create table t1(d numeric(19))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def test_small_decimal_scale(self): - # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation - # example in the C Data Types appendix. - value = '1000.10' - value = Decimal(value) - self.cursor.execute("create table t1(d numeric(20,6))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def test_negative_decimal_scale(self): - value = Decimal('-10.0010') - self.cursor.execute("create table t1(d numeric(19,4))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - def _test_inttype(self, datatype, n): - self.cursor.execute('create table t1(n %s)' % datatype) - self.cursor.execute('insert into t1 values (?)', n) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, n) - - def _maketest(datatype, value): - def t(self): - self._test_inttype(datatype, value) - return t - - for value in INTEGERS: - name = str(abs(value)) - if value < 0: - name = 'neg_' + name - locals()['test_int_%s' % name] = _maketest('int', value) - - for value in BIGINTS: - name = str(abs(value)) - if value < 0: - name = 'neg_' + name - locals()['test_bigint_%s' % name] = _maketest('bigint', value) - - def test_subquery_params(self): - """Ensure parameter markers work in a subquery""" - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - row = self.cursor.execute(""" - select x.id - from ( - select id - from t1 - where s = ? - and id between ? and ? - ) x - """, 'test', 1, 10).fetchone() - self.assertNotEqual(row, None) - self.assertEqual(row[0], 1) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_string(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_fixed_str(self): - value = "testing" - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - # - # date, time, datetime - # - - def test_datetime(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(value, result) - - def test_date(self): - value = date(2001, 1, 1) - - self.cursor.execute("create table t1(dt date)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), type(value)) - self.assertEqual(result, value) - - # - # ints and floats - # - - def test_int(self): - value = 1234 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_int(self): - value = -1 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_bigint(self): - - # This fails on 64-bit Fedora with 5.1. - # Should return 0x0123456789 - # Does return 0x0000000000 - # - # Top 4 bytes are returned as 0x00 00 00 00. If the input is high enough, they are returned as 0xFF FF FF FF. - input = 0x123456789 - self.cursor.execute("create table t1(d bigint)") - self.cursor.execute("insert into t1 values (?)", input) - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(result, input) - - def test_float(self): - value = 1234.5 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - - def test_date(self): - value = date.today() - - self.cursor.execute("create table t1(d date)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(value, result) - - - def test_time(self): - value = datetime.now().time() - - # We aren't yet writing values using the new extended time type so the value written to the database is only - # down to the second. - value = value.replace(microsecond=0) - - self.cursor.execute("create table t1(t time)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select t from t1").fetchone()[0] - self.assertEqual(value, result) - - # - # misc - # - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - """ - Ensure Cursor.rowcount is set properly after a select statement. - - pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount. Databases can return the actual rowcount - or they can return -1 if it would help performance. MySQL seems to always return the correct rowcount. - """ - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, count) - - rows = self.cursor.fetchall() - self.assertEqual(len(rows), count) - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_reset(self): - "Ensure rowcount is reset to -1" - - # The Python DB API says that rowcount should be set to -1 and most ODBC drivers let us know there are no - # records. MySQL always returns 0, however. Without parsing the SQL (which we are not going to do), I'm not - # sure how we can tell the difference and set the value to -1. For now, I'll have this test check for 0. - - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.assertEqual(self.cursor.rowcount, 1) - - self.cursor.execute("create table t2(i int)") - self.assertEqual(self.cursor.rowcount, 0) - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(self.cursor.description, row.cursor_description) - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [(i, str(i)) for i in range(1, 6)] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - def test_fast_executemany(self): - driver_name = self.cnxn.getinfo(pyodbc.SQL_DRIVER_NAME) - if driver_name.lower().endswith('a.dll') or driver_name.lower().endswith('a.so'): - # skip this test for the ANSI driver - # on Windows, it crashes CPython - # on Linux, it simply fails - return - - self.cursor.fast_executemany = True - - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [(i, str(i)) for i in range(1, 6)] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - def test_executemany_one(self): - "Pass executemany a single sequence" - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, "test") ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - # REVIEW: The following fails. Research. - - # def test_executemany_failure(self): - # """ - # Ensure that an exception is raised if one query in an executemany fails. - # """ - # self.cursor.execute("create table t1(a int, b varchar(10))") - # - # params = [ (1, 'good'), - # ('error', 'not an int'), - # (3, 'good') ] - # - # self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - def test_emoticons_as_parameter(self): - # https://github.com/mkleehammer/pyodbc/issues/423 - # - # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number - # of characters. Ensure it works even with 4-byte characters. - # - # http://www.fileformat.info/info/unicode/char/1f31c/index.htm - - v = "x \U0001F31C z" - - self.cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") - self.cursor.execute("insert into t1 values (?)", v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def test_emoticons_as_literal(self): - # https://github.com/mkleehammer/pyodbc/issues/630 - - v = "x \U0001F31C z" - - self.cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") - self.cursor.execute("insert into t1 values ('%s')" % v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - -def main(): - from argparse import ArgumentParser - parser = ArgumentParser(usage=usage) - parser.add_argument("-v", "--verbose", action="count", default=0, help="increment test verbosity (can be used multiple times)") - parser.add_argument("-d", "--debug", action="store_true", default=False, help="print debugging items") - parser.add_argument("-t", "--test", help="run only the named test") - parser.add_argument("--mysql", nargs='*', help="connection string(s) for MySQL") - # typically, the connection string is provided as the only parameter, so handle this case - parser.add_argument('conn_str', nargs='*', help="connection string for MySQL") - args = parser.parse_args() - - if len(args.conn_str) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if args.mysql is not None: - connection_strings = args.mysql - elif len(args.conn_str) == 1 and args.conn_str[0]: - connection_strings = [args.conn_str[0]] - else: - config_conn_string = testutils.load_setup_connection_string('mysqltests') - if config_conn_string is None: - parser.print_help() - return True # no connection string, therefore nothing to do - else: - connection_strings = [config_conn_string] - - if args.verbose: - cnxn = pyodbc.connect(connection_strings[0]) - testutils.print_library_info(cnxn) - cnxn.close() - - overall_result = True - for connection_string in connection_strings: - print(f'Running tests with connection string: {connection_string}') - suite = testutils.load_tests(MySqlTestCase, args.test, connection_string) - testRunner = unittest.TextTestRunner(verbosity=args.verbose) - result = testRunner.run(suite) - if not result.wasSuccessful(): - overall_result = False - - return overall_result - - -if __name__ == '__main__': - - # add the build directory to the Python path so we're testing the latest - # build, not the pip-installed version - testutils.add_to_path() - - # only after setting the Python path, import the pyodbc module - import pyodbc - - # run the tests - sys.exit(0 if main() else 1) diff --git a/tests3/pgtests.py b/tests3/pgtests.py deleted file mode 100755 index 63331c29..00000000 --- a/tests3/pgtests.py +++ /dev/null @@ -1,795 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -usage = """\ -%(prog)s [options] connection_string - -Unit tests for PostgreSQL. To use, pass a connection string as the parameter. -The tests will create and drop tables t1 and t2 as necessary. - -These run using the version from the 'build' directory, not the version -installed into the Python directories. You must run python setup.py build -before running the tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [pgtests] - connection-string=DSN=PostgreSQL35W - -Note: Be sure to use the "Unicode" (not the "ANSI") version of the PostgreSQL ODBC driver. -""" - -import os -import sys -import uuid -import unittest -from decimal import Decimal - -if __name__ != '__main__': - import pyodbc - -import testutils - - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - - -def _generate_test_string(length): - """ - Returns a string of composed of `seed` to make a string `length` characters long. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so - most data types are tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that - "overlap" errors will be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = int((length + len(_TESTSTR) - 1) / len(_TESTSTR)) - v = _TESTSTR * c - return v[:length] - - -class PGTestCase(unittest.TestCase): - - INTEGERS = [ -1, 0, 1, 0x7FFFFFFF ] - BIGINTS = INTEGERS + [ 0xFFFFFFFF, 0x123456789 ] - - SMALL_READ = 100 - LARGE_READ = 4000 - - SMALL_STRING = _generate_test_string(SMALL_READ) - LARGE_STRING = _generate_test_string(LARGE_READ) - SMALL_BYTES = bytes(SMALL_STRING, 'utf-8') - LARGE_BYTES = bytes(LARGE_STRING, 'utf-8') - - def __init__(self, method_name, connection_string=None, ansi=False): - unittest.TestCase.__init__(self, method_name) - if connection_string is not None: - self.connection_string = connection_string - else: - # if the connection string cannot be provided directly here, it can be - # provided in an environment variable - self.connection_string = os.environ['PYODBC_CONN_STR'] - self.ansi = ansi - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string, ansi=self.ansi) - self.cursor = self.cnxn.cursor() - - # I've set my test database to use UTF-8 which seems most popular. - # - # disabled re: issue #1004 - # - #self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') - #self.cnxn.setencoding(encoding='utf-8') - - # As of psql 9.5.04 SQLGetTypeInfo returns absurdly small sizes leading - # to slow writes. Override them: - self.cnxn.maxwrite = 1024 * 1024 * 1024 - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def _simpletest(datatype, inval): - # A simple test that can be used for any data type where the Python - # type we write is also what we expect to receive. - def _t(self): - self.cursor.execute('create table t1(inval %s)' % datatype) - self.cursor.execute('insert into t1 values (?)', inval) - outval = self.cursor.execute("select inval from t1").fetchone()[0] - self.assertEqual(outval, inval) - return _t - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - # def test_gettypeinfo(self): - # self.cursor.getTypeInfo(pyodbc.SQL_VARCHAR) - # cols = [t[0] for t in self.cursor.description] - # print('cols:', cols) - # for row in self.cursor: - # for col,val in zip(cols, row): - # print(' ', col, val) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, int)) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - - def _test_strtype(self, sqltype, value, colsize=None, resulttype=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - - result = self.cursor.execute("select * from t1").fetchone()[0] - - if resulttype and type(value) is not resulttype: - value = resulttype(value) - - self.assertEqual(result, value) - - def test_maxwrite(self): - # If we write more than `maxwrite` bytes, pyodbc will switch from - # binding the data all at once to providing it at execute time with - # SQLPutData. The default maxwrite is 1GB so this is rarely needed in - # PostgreSQL but I need to test the functionality somewhere. - self.cnxn.maxwrite = 300 - self._test_strtype('varchar', _generate_test_string(400)) - - # - # VARCHAR - # - - def test_empty_varchar(self): - self._test_strtype('varchar', '', self.SMALL_READ) - - def test_null_varchar(self): - self._test_strtype('varchar', None, self.SMALL_READ) - - def test_large_null_varchar(self): - # There should not be a difference, but why not find out? - self._test_strtype('varchar', None, self.LARGE_READ) - - def test_small_varchar(self): - self._test_strtype('varchar', self.SMALL_STRING, self.SMALL_READ) - - def test_large_varchar(self): - self._test_strtype('varchar', self.LARGE_STRING, self.LARGE_READ) - - def test_varchar_many(self): - self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") - - v1 = 'ABCDEFGHIJ' * 30 - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3) - row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() - - self.assertEqual(v1, row.c1) - self.assertEqual(v2, row.c2) - self.assertEqual(v3, row.c3) - - def test_chinese(self): - v = '我的' - self.cursor.execute("SELECT N'我的' AS name") - row = self.cursor.fetchone() - self.assertEqual(row[0], v) - - self.cursor.execute("SELECT N'我的' AS name") - rows = self.cursor.fetchall() - self.assertEqual(rows[0][0], v) - - # - # bytea - # - - def test_null_bytea(self): - self._test_strtype('bytea', None) - def test_small_bytea(self): - self._test_strtype('bytea', self.SMALL_BYTES) - def test_large_bytea(self): - self._test_strtype('bytea', self.LARGE_BYTES) - - # Now test with bytearray - def test_large_bytea_array(self): - self._test_strtype('bytea', bytearray(self.LARGE_BYTES), resulttype=bytes) - - for value in INTEGERS: - name = str(value).replace('.', '_').replace('-', 'neg_') - locals()['test_int_%s' % name] = _simpletest('int', value) - - for value in BIGINTS: - name = str(value).replace('.', '_').replace('-', 'neg_') - locals()['test_bigint_%s' % name] = _simpletest('bigint', value) - - for value in [-1234.56, -1, 0, 1, 1234.56, 123456789.21]: - name = str(value).replace('.', '_').replace('-', 'neg_') - locals()['test_money_%s' % name] = _simpletest('money', value) - - for value in "-1234.56 -1 0 1 1234.56 123456789.21".split(): - name = value.replace('.', '_').replace('-', 'neg_') - locals()['test_decimal_%s' % name] = _simpletest('decimal(20,6)', Decimal(value)) - - for value in "-1234.56 -1 0 1 1234.56 123456789.21".split(): - name = value.replace('.', '_').replace('-', 'neg_') - locals()['test_numeric_%s' % name] = _simpletest('numeric(20,6)', Decimal(value)) - - def test_large_decimal(self): - # Version 4.0.35 had a buffer overflow here. - self.cursor.execute("SELECT 991113333311111333331111133333111113333311111333337711133333111113333311111333331111133333881113333321341235123512351123.1231245123512341241234::decimal AS n") - self.cursor.fetchone() - - def test_small_decimal(self): - value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) - self.cursor.execute("create table t1(d numeric(19))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def test_small_decimal_scale(self): - # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation - # example in the C Data Types appendix. - value = '1000.10' - value = Decimal(value) - self.cursor.execute("create table t1(d numeric(20,6))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def test_negative_decimal_scale(self): - value = Decimal('-10.0010') - self.cursor.execute("create table t1(d numeric(19,4))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - def test_nonnative_uuid(self): - # The default is False meaning we should return a string. Note that - # SQL Server seems to always return uppercase. - value = uuid.uuid4() - self.cursor.execute("create table t1(n uuid)") - self.cursor.execute("insert into t1 values (?)", value) - - pyodbc.native_uuid = False - result = self.cursor.execute("select n from t1").fetchval() - self.assertEqual(type(result), str) - self.assertEqual(result, str(value).upper()) - - def test_native_uuid(self): - # When true, we should return a uuid.UUID object. - value = uuid.uuid4() - self.cursor.execute("create table t1(n uuid)") - self.cursor.execute("insert into t1 values (?)", value) - - pyodbc.native_uuid = True - result = self.cursor.execute("select n from t1").fetchval() - self.assertIsInstance(result, uuid.UUID) - self.assertEqual(value, result) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_string(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_fixed_str(self): - value = "testing" - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL - self.assertEqual(v, value) - - def test_fetchval(self): - expected = "test" - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", expected) - result = self.cursor.execute("select * from t1").fetchval() - self.assertEqual(result, expected) - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, 4) - - # PostgreSQL driver fails here? - # def test_rowcount_reset(self): - # "Ensure rowcount is reset to -1" - # - # self.cursor.execute("create table t1(i int)") - # count = 4 - # for i in range(count): - # self.cursor.execute("insert into t1 values (?)", i) - # self.assertEqual(self.cursor.rowcount, 1) - # - # self.cursor.execute("create table t2(i int)") - # self.assertEqual(self.cursor.rowcount, -1) - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - # REVIEW: Without the cast, we get the following error: - # [07006] [unixODBC]Received an unsupported type from Postgres.;\nERROR: table "t2" does not exist (14) - - count = self.cursor.execute("select cast(count(*) as int) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - def test_fast_executemany(self): - - self.fast_executemany = True - - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [(i, str(i)) for i in range(1, 6)] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - # REVIEW: Without the cast, we get the following error: [07006] [unixODBC]Received an - # unsupported type from Postgres.;\nERROR: table "t2" does not exist (14) - - count = self.cursor.execute("select cast(count(*) as int) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - def test_executemany_failure(self): - """ - Ensure that an exception is raised if one query in an executemany fails. - """ - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, 'good'), - ('error', 'not an int'), - (3, 'good') ] - - self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)") - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - def test_cnxn_execute_error(self): - """ - Make sure that Connection.execute (not Cursor) errors are not "eaten". - - GitHub issue #74 - """ - self.cursor.execute("create table t1(a int primary key)") - self.cursor.execute("insert into t1 values (1)") - self.assertRaises(pyodbc.Error, self.cnxn.execute, "insert into t1 values (1)") - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d text)") - self.cursor.execute("insert into t1 values(1,2,3,'four')") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 'four')") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - def test_exc_integrity(self): - "Make sure an IntegretyError is raised" - # This is really making sure we are properly encoding and comparing the SQLSTATEs. - self.cursor.execute("create table t1(s1 varchar(10) primary key)") - self.cursor.execute("insert into t1 values ('one')") - self.assertRaises(pyodbc.IntegrityError, self.cursor.execute, "insert into t1 values ('one')") - - - def test_cnxn_set_attr_before(self): - # I don't have a getattr right now since I don't have a table telling me what kind of - # value to expect. For now just make sure it doesn't crash. - # From the unixODBC sqlext.h header file. - SQL_ATTR_PACKET_SIZE = 112 - othercnxn = pyodbc.connect(self.connection_string, attrs_before={ SQL_ATTR_PACKET_SIZE : 1024 * 32 }) - - def test_cnxn_set_attr(self): - # I don't have a getattr right now since I don't have a table telling me what kind of - # value to expect. For now just make sure it doesn't crash. - # From the unixODBC sqlext.h header file. - SQL_ATTR_ACCESS_MODE = 101 - SQL_MODE_READ_ONLY = 1 - self.cnxn.set_attr(SQL_ATTR_ACCESS_MODE, SQL_MODE_READ_ONLY) - - - def test_columns(self): - driver_version = tuple( - int(x) for x in self.cnxn.getinfo(pyodbc.SQL_DRIVER_VER).split(".") - ) - - def _get_column_size(row): - # the driver changed the name of the returned columns in version 13.02. - # see https://odbc.postgresql.org/docs/release.html, release 13.02.0000, change 6. - return row.column_size if driver_version >= (13, 2, 0) else row.precision - - # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error - # - # Error: TypeError: argument 2 must be str, not None - # - # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an - # optional string keyword when calling indirectly. - - self.cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))") - - self.cursor.columns('t1') - results = {row.column_name: row for row in self.cursor} - row = results['a'] - assert row.type_name == 'int4', row.type_name - row = results['b'] - assert row.type_name == 'varchar' - assert _get_column_size(row) == 3, _get_column_size(row) - row = results['xΏz'] - assert row.type_name == 'varchar' - assert _get_column_size(row) == 4, _get_column_size(row) - - # Now do the same, but specifically pass in None to one of the keywords. Old versions - # were parsing arguments incorrectly and would raise an error. (This crops up when - # calling indirectly like columns(*args, **kwargs) which aiodbc does.) - - self.cursor.columns('t1', schema=None, catalog=None) - results = {row.column_name: row for row in self.cursor} - row = results['a'] - assert row.type_name == 'int4', row.type_name - row = results['b'] - assert row.type_name == 'varchar' - assert _get_column_size(row) == 3 - - def test_cancel(self): - # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with - # making sure SQLCancel is called correctly. - self.cursor.execute("select 1") - self.cursor.cancel() - - def test_emoticons_as_parameter(self): - # https://github.com/mkleehammer/pyodbc/issues/423 - # - # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number - # of characters. Ensure it works even with 4-byte characters. - # - # http://www.fileformat.info/info/unicode/char/1f31c/index.htm - - v = "x \U0001F31C z" - - self.cursor.execute("CREATE TABLE t1(s varchar(100))") - self.cursor.execute("insert into t1 values (?)", v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def test_emoticons_as_literal(self): - # https://github.com/mkleehammer/pyodbc/issues/630 - - v = "x \U0001F31C z" - - self.cursor.execute("CREATE TABLE t1(s varchar(100))") - self.cursor.execute("insert into t1 values ('%s')" % v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def test_cursor_messages(self): - """ - Test the Cursor.messages attribute. - """ - # self.cursor is used in setUp, hence is not brand new at this point - brand_new_cursor = self.cnxn.cursor() - self.assertIsNone(brand_new_cursor.messages) - - # using INFO message level because they are always sent to the client regardless of - # client_min_messages: https://www.postgresql.org/docs/11/runtime-config-client.html - for msg in ('hello world', 'ABCDEFGHIJ' * 800): - self.cursor.execute(""" - CREATE OR REPLACE PROCEDURE test_cursor_messages() - LANGUAGE plpgsql - AS $$ - BEGIN - RAISE INFO '{}' USING ERRCODE = '01000'; - END; - $$; - """.format(msg)) - self.cursor.execute("CALL test_cursor_messages();") - messages = self.cursor.messages - self.assertTrue(type(messages) is list) - self.assertTrue(len(messages) > 0) - self.assertTrue(all(type(m) is tuple for m in messages)) - self.assertTrue(all(len(m) == 2 for m in messages)) - self.assertTrue(all(type(m[0]) is str for m in messages)) - self.assertTrue(all(type(m[1]) is str for m in messages)) - self.assertTrue(all(m[0] == '[01000] (-1)' for m in messages)) - self.assertTrue(''.join(m[1] for m in messages).endswith(msg)) - - def test_output_conversion(self): - # Note the use of SQL_WVARCHAR, not SQL_VARCHAR. - - def convert(value): - # The value is the raw bytes (as a bytes object) read from the - # database. We'll simply add an X at the beginning at the end. - return 'X' + value.decode('latin1') + 'X' - - self.cursor.execute("create table t1(n int, v varchar(10))") - self.cursor.execute("insert into t1 values (1, '123.45')") - - self.cnxn.add_output_converter(pyodbc.SQL_WVARCHAR, convert) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - - # Clear all conversions and try again. There should be no Xs this time. - self.cnxn.clear_output_converters() - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - - # Same but clear using remove_output_converter. - self.cnxn.add_output_converter(pyodbc.SQL_WVARCHAR, convert) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - - self.cnxn.remove_output_converter(pyodbc.SQL_WVARCHAR) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - - # And lastly, clear by passing None for the converter. - self.cnxn.add_output_converter(pyodbc.SQL_WVARCHAR, convert) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - - self.cnxn.add_output_converter(pyodbc.SQL_WVARCHAR, None) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - -def main(): - from argparse import ArgumentParser - parser = ArgumentParser(usage=usage) - parser.add_argument("-v", "--verbose", action="count", default=0, help="increment test verbosity (can be used multiple times)") - parser.add_argument("-d", "--debug", action="store_true", default=False, help="print debugging items") - parser.add_argument("-t", "--test", help="run only the named test") - parser.add_argument("-a", "--ansi", action="store_true", default=False, help="ANSI only") - parser.add_argument("--postgresql", nargs='*', help="connection string(s) for PostgreSQL") - # typically, the connection string is provided as the only parameter, so handle this case - parser.add_argument('conn_str', nargs='*', help="connection string for PostgreSQL") - args = parser.parse_args() - - if len(args.conn_str) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if args.postgresql is not None: - connection_strings = args.postgresql - elif len(args.conn_str) == 1 and args.conn_str[0]: - connection_strings = [args.conn_str[0]] - else: - config_conn_string = testutils.load_setup_connection_string('pgtests') - if config_conn_string is None: - parser.print_help() - return True # no connection string, therefore nothing to do - else: - connection_strings = [config_conn_string] - - if args.verbose: - cnxn = pyodbc.connect(connection_strings[0], ansi=args.ansi) - testutils.print_library_info(cnxn) - cnxn.close() - - overall_result = True - for connection_string in connection_strings: - print(f'Running tests with connection string: {connection_string}') - - if args.test: - # Run a single test - if not args.test.startswith('test_'): - args.test = 'test_%s' % (args.test) - - suite = unittest.TestSuite([ - PGTestCase(method_name=args.test, connection_string=connection_string, ansi=args.ansi) - ]) - else: - # Run all tests in the class - methods = [ m for m in dir(PGTestCase) if m.startswith('test_') ] - methods.sort() - suite = unittest.TestSuite([ - PGTestCase( method_name=m, connection_string=connection_string, ansi=args.ansi) for m in methods - ]) - - testRunner = unittest.TextTestRunner(verbosity=args.verbose) - result = testRunner.run(suite) - if not result.wasSuccessful(): - overall_result = False - - return overall_result - - -if __name__ == '__main__': - - # add the build directory to the Python path so we're testing the latest - # build, not the pip-installed version - testutils.add_to_path() - - # only after setting the Python path, import the pyodbc module - import pyodbc - - # run the tests - sys.exit(0 if main() else 1) diff --git a/tests3/run_tests.py b/tests3/run_tests.py deleted file mode 100644 index a6526233..00000000 --- a/tests3/run_tests.py +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/python -import configparser -import os -import sys -from typing import List, Optional, Tuple - -import testutils - -import pyodbc -import pytest - - -def option_transform(optionstr: str) -> str: - # the default ConfigParser() behavior is to lowercase key values, - # override this by simply returning the original key value - return optionstr - - -def generate_connection_string(attrs: List[Tuple[str, str]]) -> str: - attrs_str_list = [] - for key, value in attrs: - # escape/bookend values that include special characters - # ref: https://learn.microsoft.com/en-us/openspecs/sql_server_protocols/ms-odbcstr/348b0b4d-358a-41fb-9753-6351425809cb - if any(c in value for c in ';} '): - value = '{{{}}}'.format(value.replace('}', '}}')) - - attrs_str_list.append(f'{key}={value}') - - conn_str = ';'.join(attrs_str_list) - return conn_str - - -def read_db_config() -> Tuple[List[str], List[str], List[str]]: - sqlserver = [] - postgresql = [] - mysql = [] - - # get the filename of the database configuration file - pyodbc_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - default_cfg_file = os.path.join(pyodbc_dir, 'tmp', 'database.cfg') - cfg_file = os.getenv('PYODBC_DATABASE_CFG', default_cfg_file) - - if os.path.exists(cfg_file): - print(f'Using database configuration file: {cfg_file}') - - # read the contents of the config file - config = configparser.ConfigParser() - config.optionxform = option_transform # prevents keys from being lowercased - config.read(cfg_file) - - # generate the connection strings - for section in config.sections(): - section_lower = section.lower() - if section_lower.startswith('sqlserver'): - conn_string = generate_connection_string(config.items(section)) - sqlserver.append(conn_string) - elif section_lower.startswith('postgres'): - conn_string = generate_connection_string(config.items(section)) - postgresql.append(conn_string) - elif section_lower.startswith('mysql'): - conn_string = generate_connection_string(config.items(section)) - mysql.append(conn_string) - else: - print(f'Database configuration file not found: {cfg_file}') - - return sqlserver, postgresql, mysql - - -def main(sqlserver: Optional[List[str]] = None, - postgresql: Optional[List[str]] = None, - mysql: Optional[List[str]] = None, - verbose: int = 0, - quiet: int = 0, - k_expression: Optional[str] = None) -> bool: - - # read from the config file if no connection strings provided - if not (sqlserver or postgresql or mysql): - sqlserver, postgresql, mysql = read_db_config() - - if not (sqlserver or postgresql or mysql): - print('No tests have been run because no database connection info was provided') - return False - - tests_dir = os.path.dirname(os.path.abspath(__file__)) - - databases = { - 'SQL Server': { - 'conn_strs': sqlserver or [], - 'discovery_patterns': [ - # FUTURE: point to dir specific to SQL Server - os.path.join(tests_dir, 'sqlserver'), - os.path.join(tests_dir, 'sqlservertests.py'), - ], - }, - 'PostgreSQL': { - 'conn_strs': postgresql or [], - 'discovery_patterns': [ - # FUTURE: point to dir specific to PostgreSQL - os.path.join(tests_dir, 'postgresql'), - os.path.join(tests_dir, 'pgtests.py'), - ], - }, - 'MySQL': { - 'conn_strs': mysql or [], - 'discovery_patterns': [ - # FUTURE: point to dir specific to MySQL - os.path.join(tests_dir, 'mysql'), - os.path.join(tests_dir, 'mysqltests.py'), - ], - }, - } - - overall_result = True - for db_name, db_attrs in databases.items(): - - for db_conn_str in db_attrs['conn_strs']: - print(f'Running tests against {db_name} with connection string: {db_conn_str}') - - if verbose > 0: - cnxn = pyodbc.connect(db_conn_str) - testutils.print_library_info(cnxn) - cnxn.close() - - # it doesn't seem to be easy to pass test parameters into the test - # discovery process, so the connection string will have to be passed - # to the test cases via an environment variable - os.environ['PYODBC_CONN_STR'] = db_conn_str - - # construct arguments for pytest - pytest_args = [] - - if verbose > 0: - pytest_args.extend(['-v'] * verbose) - elif quiet > 0: - pytest_args.extend(['-q'] * quiet) - - if k_expression: - pytest_args.extend(['-k', k_expression]) - - pytest_args.extend(db_attrs['discovery_patterns']) - - # run the tests - retcode = pytest.main(args=pytest_args) - if retcode == pytest.ExitCode.NO_TESTS_COLLECTED: - print('No tests collected during discovery') - overall_result = False - elif retcode != pytest.ExitCode.OK: - overall_result = False - - return overall_result - - -if __name__ == '__main__': - from argparse import ArgumentParser - parser = ArgumentParser() - parser.add_argument("--sqlserver", action="append", help="connection string for SQL Server") - parser.add_argument("--postgresql", action="append", help="connection string for PostgreSQL") - parser.add_argument("--mysql", action="append", help="connection string for MySQL") - parser.add_argument("-k", dest="k_expression", help="run tests whose names match the expression") - qv_group = parser.add_mutually_exclusive_group() - qv_group.add_argument("-q", "--quiet", action="count", default=0, help="decrease test verbosity (can be used multiple times)") - qv_group.add_argument("-v", "--verbose", action="count", default=0, help="increment test verbosity (can be used multiple times)") - # TODO: gather any remaining args and include in call to pytest??? i.e. known_args, other_args = parser.parse_known_args() - args = parser.parse_args() - - # run the tests - passed = main(**vars(args)) - sys.exit(0 if passed else 1) diff --git a/tests3/sqlservertests.py b/tests3/sqlservertests.py deleted file mode 100755 index 360beb47..00000000 --- a/tests3/sqlservertests.py +++ /dev/null @@ -1,1995 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -usage = """\ -%(prog)s [options] connection_string - -Unit tests for SQL Server. To use, pass a connection string as the parameter. -The tests will create and drop tables t1 and t2 as necessary. - -These run using the version from the 'build' directory, not the version -installed into the Python directories. You must run python setup.py build -before running the tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [sqlservertests] - connection-string=DRIVER={SQL Server};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db - -The connection string above will use the 2000/2005 driver, even if SQL Server 2008 -is installed: - - 2000: DRIVER={SQL Server} - 2005: DRIVER={SQL Server} - 2008: DRIVER={SQL Server Native Client 10.0} - -If using FreeTDS ODBC, be sure to use version 1.1.23 or newer. -""" - -import sys, os, re, uuid -import unittest -from decimal import Decimal -from datetime import datetime, date, time -from os.path import join, getsize, dirname, abspath -from warnings import warn - -if __name__ != '__main__': - import pyodbc - -import testutils - - -# Some tests have fallback code for known driver issues. -# Change this value to False to bypass the fallback code, e.g., to see -# if a newer version of the driver has fixed the underlying issue. -# -handle_known_issues = True - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = int((length + len(_TESTSTR)-1) / len(_TESTSTR)) - v = _TESTSTR * c - return v[:length] - -class SqlServerTestCase(unittest.TestCase): - - SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] - LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] - - STR_FENCEPOSTS = [_generate_test_string(size) for size in SMALL_FENCEPOST_SIZES] - LARGE_STR_FENCEPOSTS = STR_FENCEPOSTS + [_generate_test_string(size) for size in LARGE_FENCEPOST_SIZES] - - BYTE_FENCEPOSTS = [ bytes(s, 'ascii') for s in STR_FENCEPOSTS ] - IMAGE_FENCEPOSTS = BYTE_FENCEPOSTS + [ bytes(_generate_test_string(size), 'ascii') for size in LARGE_FENCEPOST_SIZES ] - - def __init__(self, method_name, connection_string=None): - unittest.TestCase.__init__(self, method_name) - if connection_string is not None: - self.connection_string = connection_string - else: - # if the connection string cannot be provided directly here, it can be - # provided in an environment variable - self.connection_string = os.environ['PYODBC_CONN_STR'] - - def driver_type_is(self, type_name): - recognized_types = { - 'msodbcsql': '(Microsoft) ODBC Driver xx for SQL Server', - 'freetds': 'FreeTDS ODBC', - } - if not type_name in recognized_types.keys(): - raise KeyError('"{0}" is not a recognized driver type: {1}'.format(type_name, list(recognized_types.keys()))) - driver_name = self.cnxn.getinfo(pyodbc.SQL_DRIVER_NAME).lower() - if type_name == 'msodbcsql': - return ('msodbcsql' in driver_name) or ('sqlncli' in driver_name) or ('sqlsrv32.dll' == driver_name) - elif type_name == 'freetds': - return ('tdsodbc' in driver_name) - - def handle_known_issues_for(self, type_name, print_reminder=False, failure_crashes_python=False): - """ - Checks driver `type_name` and "killswitch" variable `handle_known_issues` to see if - known issue handling should be bypassed. Optionally prints a reminder message to - help identify tests that previously had issues but may have been fixed by a newer - version of the driver. - - Usage examples: - - # 1. print reminder at beginning of test (before any errors can occur) - # - def test_some_feature(self): - self.handle_known_issues_for('freetds', print_reminder=True) - # (continue with test code) - - # 2. conditional execution of fallback code - # - try: - # (some test code) - except pyodbc.DataError: - if self.handle_known_issues_for('freetds'): - # FREETDS_KNOWN_ISSUE - # - # (fallback code to work around exception) - else: - raise - """ - if self.driver_type_is(type_name): - if handle_known_issues or failure_crashes_python: - return True - else: - if print_reminder: - print("Known issue handling is disabled. Does this test still fail?") - return False - - def get_sqlserver_version(self): - """ - Returns the major version: 8-->2000, 9-->2005, 10-->2008 - """ - self.cursor.execute("exec master..xp_msver 'ProductVersion'") - row = self.cursor.fetchone() - return int(row.Character_Value.split('.', 1)[0]) - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string) - self.cursor = self.cnxn.cursor() - - # I (Kleehammer) have been using a latin1 collation. If you have a - # different collation, you'll need to update this. If someone knows of - # a good way for this to be dynamic, please update. (I suppose we - # could maintain a map from collation to encoding?) - self.cnxn.setdecoding(pyodbc.SQL_CHAR, 'latin1') - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - for i in range(3): - try: - self.cursor.execute("drop procedure proc%d" % i) - self.cnxn.commit() - except: - pass - - try: - self.cursor.execute('drop function func1') - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def _simpletest(datatype, value): - # A simple test that can be used for any data type where the Python - # type we write is also what we expect to receive. - def _t(self): - self.cursor.execute('create table t1(value %s)' % datatype) - self.cursor.execute('insert into t1 values (?)', value) - result = self.cursor.execute("select value from t1").fetchone()[0] - self.assertEqual(result, value) - return _t - - def test_multiple_bindings(self): - "More than one bind and select on a cursor" - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t1 values (?)", 2) - self.cursor.execute("insert into t1 values (?)", 3) - for i in range(3): - self.cursor.execute("select n from t1 where n < ?", 10) - self.cursor.execute("select n from t1 where n < 3") - - - def test_different_bindings(self): - self.cursor.execute("create table t1(n int)") - self.cursor.execute("create table t2(d datetime)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t2 values (?)", datetime.now()) - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, int))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - def test_noscan(self): - self.assertEqual(self.cursor.noscan, False) - self.cursor.noscan = True - self.assertEqual(self.cursor.noscan, True) - - def test_nonnative_uuid(self): - # The default is False meaning we should return a string. Note that - # SQL Server seems to always return uppercase. - value = uuid.uuid4() - self.cursor.execute("create table t1(n uniqueidentifier)") - self.cursor.execute("insert into t1 values (?)", value) - - pyodbc.native_uuid = False - result = self.cursor.execute("select n from t1").fetchval() - self.assertEqual(type(result), str) - self.assertEqual(result, str(value).upper()) - - def test_native_uuid(self): - # When true, we should return a uuid.UUID object. - value = uuid.uuid4() - self.cursor.execute("create table t1(n uniqueidentifier)") - self.cursor.execute("insert into t1 values (?)", value) - - pyodbc.native_uuid = True - result = self.cursor.execute("select n from t1").fetchval() - self.assertIsInstance(result, uuid.UUID) - self.assertEqual(value, result) - - def test_nextset(self): - self.cursor.execute("create table t1(i int)") - for i in range(4): - self.cursor.execute("insert into t1(i) values(?)", i) - - self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") - - for i, row in enumerate(self.cursor): - self.assertEqual(i, row.i) - - self.assertEqual(self.cursor.nextset(), True) - - for i, row in enumerate(self.cursor): - self.assertEqual(i + 2, row.i) - - def test_nextset_with_raiserror(self): - self.handle_known_issues_for('freetds', print_reminder=True) - self.cursor.execute("select i = 1; RAISERROR('c', 16, 1);") - row = next(self.cursor) - self.assertEqual(1, row.i) - if self.handle_known_issues_for('freetds'): - warn('FREETDS_KNOWN_ISSUE - test_nextset_with_raiserror: test cancelled.') - # AssertionError: ProgrammingError not raised by nextset - # https://github.com/FreeTDS/freetds/issues/230 - return # for now - self.assertRaises(pyodbc.ProgrammingError, self.cursor.nextset) - - def test_fixed_unicode(self): - value = "t\xebsting" - self.cursor.execute("create table t1(s nchar(7))") - self.cursor.execute("insert into t1 values(?)", "t\xebsting") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL - self.assertEqual(v, value) - - - def _test_strtype(self, sqltype, value, resulttype=None, colsize=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert ( - value is None - or - colsize == -1 or colsize is None or colsize >= len(value) - ), colsize - - if colsize == -1: - sql = "create table t1(s %s(max))" % sqltype - elif colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - self.cursor.execute(sql) - - if resulttype is None: - resulttype = type(value) - - sql = "insert into t1 values(?)" - try: - self.cursor.execute(sql, value) - except pyodbc.DataError: - if self.handle_known_issues_for('freetds'): - # FREETDS_KNOWN_ISSUE - # - # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so - # pyodbc can't call SQLDescribeParam to get the correct parameter type. - # This can lead to errors being returned from SQL Server when sp_prepexec is called, - # e.g., "Implicit conversion from data type varchar to varbinary is not allowed." - # for test_binary_null - # - # So at least verify that the user can manually specify the parameter type - if sqltype == 'varbinary': - sql_param_type = pyodbc.SQL_VARBINARY - # (add elif blocks for other cases as required) - self.cursor.setinputsizes([(sql_param_type, colsize, 0)]) - self.cursor.execute(sql, value) - else: - raise - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), resulttype) - - if value is not None: - self.assertEqual(len(v), len(value)) - - # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before - # comparing. - if type(value) is not resulttype: - value = resulttype(value) - - self.assertEqual(v, value) - - - def _test_strliketype(self, sqltype, value, resulttype=None, colsize=None): - """ - The implementation for text, image, ntext, and binary. - - These types do not support comparison operators. - """ - assert colsize is None or isinstance(colsize, int), colsize - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - if resulttype is None: - resulttype = type(value) - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - result = self.cursor.execute("select * from t1").fetchone()[0] - - self.assertEqual(type(result), resulttype) - - # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before - # comparing. - if type(value) is not resulttype: - value = resulttype(value) - - self.assertEqual(result, value) - - - # - # varchar - # - - def test_varchar_null(self): - self._test_strtype('varchar', None, colsize=100) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, colsize=len(value)) - return t - for value in STR_FENCEPOSTS: - locals()['test_varchar_%s' % len(value)] = _maketest(value) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, colsize=-1) - return t - for value in LARGE_STR_FENCEPOSTS: - locals()['test_varchar_max_%s' % len(value)] = _maketest(value) - - def test_varchar_many(self): - self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") - - v1 = 'ABCDEFGHIJ' * 30 - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); - row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() - - self.assertEqual(v1, row.c1) - self.assertEqual(v2, row.c2) - self.assertEqual(v3, row.c3) - - # - # nvarchar - # - - def test_unicode_null(self): - self._test_strtype('nvarchar', None, colsize=100) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, colsize=len(value)) - return t - for value in STR_FENCEPOSTS: - locals()['test_unicode_%s' % len(value)] = _maketest(value) - - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, colsize=-1) - return t - for value in LARGE_STR_FENCEPOSTS: - locals()['test_unicode_max_%s' % len(value)] = _maketest(value) - - def test_unicode_longmax(self): - # Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes - - ver = self.get_sqlserver_version() - if ver < 9: # 2005+ - return # so pass / ignore - self.cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))") - - # From issue #206 - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, colsize=len(value)) - return t - locals()['test_chinese_param'] = _maketest('我的') - - def test_chinese(self): - v = '我的' - self.cursor.execute(u"SELECT N'我的' AS [Name]") - row = self.cursor.fetchone() - self.assertEqual(row[0], v) - - self.cursor.execute(u"SELECT N'我的' AS [Name]") - rows = self.cursor.fetchall() - self.assertEqual(rows[0][0], v) - - def test_fast_executemany_to_local_temp_table(self): - if self.handle_known_issues_for('freetds', print_reminder=True, failure_crashes_python=True): - warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_to_local_temp_table: test cancelled.') - return - v = 'Ώπα' - self.cursor.execute("CREATE TABLE #issue295 (id INT IDENTITY PRIMARY KEY, txt NVARCHAR(50))") - sql = "INSERT INTO #issue295 (txt) VALUES (?)" - params = [(v,)] - self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 50, 0)]) - self.cursor.fast_executemany = True - self.cursor.executemany(sql, params) - self.assertEqual(self.cursor.execute("SELECT txt FROM #issue295").fetchval(), v) - - def test_fast_executemany_to_datetime2(self): - if self.handle_known_issues_for('freetds', print_reminder=True, failure_crashes_python=True): - warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_to_datetime2: test cancelled.') - return - v = datetime(2019, 3, 12, 10, 0, 0, 123456) - self.cursor.execute("CREATE TABLE ##issue540 (dt2 DATETIME2(2))") - sql = "INSERT INTO ##issue540 (dt2) VALUES (?)" - params = [(v,)] - self.cursor.fast_executemany = True - self.cursor.executemany(sql, params) - self.assertEqual(self.cursor.execute("SELECT CAST(dt2 AS VARCHAR) FROM ##issue540").fetchval(), '2019-03-12 10:00:00.12') - - def test_fast_executemany_high_unicode(self): - if self.handle_known_issues_for('freetds', print_reminder=True, failure_crashes_python=True): - warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_high_unicode: test cancelled.') - return - v = "🎥" - self.cursor.fast_executemany = True - self.cursor.execute("CREATE TABLE t1 (col1 nvarchar(max) null)") - self.cursor.executemany("INSERT INTO t1 (col1) VALUES (?)", [[v,]]) - self.assertEqual(self.cursor.execute("SELECT * FROM t1").fetchone()[0], v) - - # - # binary - # - - def test_binary_null(self): - self.handle_known_issues_for('freetds', print_reminder=True) - self._test_strtype('varbinary', None, colsize=100) - - # bytearray - - def _maketest(value): - def t(self): - self._test_strtype('varbinary', bytearray(value), colsize=len(value), resulttype=bytes) - return t - for value in BYTE_FENCEPOSTS: - locals()['test_binary_bytearray_%s' % len(value)] = _maketest(value) - - # bytes - - def _maketest(value): - def t(self): - self._test_strtype('varbinary', bytes(value), colsize=len(value)) - return t - for value in BYTE_FENCEPOSTS: - locals()['test_binary_bytes_%s' % len(value)] = _maketest(value) - - # - # image - # - - def test_image_null(self): - self._test_strliketype('image', None) - - # bytearray - - def _maketest(value): - def t(self): - self._test_strliketype('image', bytearray(value), resulttype=bytes) - return t - for value in IMAGE_FENCEPOSTS: - locals()['test_image_bytearray_%s' % len(value)] = _maketest(value) - - # bytes - - def _maketest(value): - def t(self): - self._test_strliketype('image', bytes(value)) - return t - for value in IMAGE_FENCEPOSTS: - locals()['test_image_bytes_%s' % len(value)] = _maketest(value) - - # - # text - # - - def test_null_text(self): - self._test_strliketype('text', None) - - def _maketest(value): - def t(self): - self._test_strliketype('text', value) - return t - for value in STR_FENCEPOSTS: - locals()['test_text_%s' % len(value)] = _maketest(value) - - # - # bit - # - - def test_bit(self): - value = True - self.cursor.execute("create table t1(b bit)") - self.cursor.execute("insert into t1 values (?)", value) - v = self.cursor.execute("select b from t1").fetchone()[0] - self.assertEqual(type(v), bool) - self.assertEqual(v, value) - - # - # decimal - # - - def _decimal(self, precision, scale, negative): - # From test provided by planders (thanks!) in Issue 91 - - self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) - - # Construct a decimal that uses the maximum precision and scale. - decStr = '9' * (precision - scale) - if scale: - decStr = decStr + "." + '9' * scale - if negative: - decStr = "-" + decStr - - value = Decimal(decStr) - - self.cursor.execute("insert into t1 values(?)", value) - - v = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(v, value) - - def _maketest(p, s, n): - def t(self): - self._decimal(p, s, n) - return t - for (p, s, n) in [ (1, 0, False), - (1, 0, True), - (6, 0, False), - (6, 2, False), - (6, 4, True), - (6, 6, True), - (38, 0, False), - (38, 10, False), - (38, 38, False), - (38, 0, True), - (38, 10, True), - (38, 38, True) ]: - locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) - - - def test_decimal_e(self): - """Ensure exponential notation decimals are properly handled""" - value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 - self.cursor.execute("create table t1(d decimal(10, 2))") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_subquery_params(self): - """Ensure parameter markers work in a subquery""" - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - row = self.cursor.execute(""" - select x.id - from ( - select id - from t1 - where s = ? - and id between ? and ? - ) x - """, 'test', 1, 10).fetchone() - self.assertNotEqual(row, None) - self.assertEqual(row[0], 1) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_string(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_empty_string_encoding(self): - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') - value = "" - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_fixed_str(self): - value = "testing" - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL - self.assertEqual(v, value) - - def test_empty_unicode(self): - self.cursor.execute("create table t1(s nvarchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_empty_unicode_encoding(self): - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') - value = "" - self.cursor.execute("create table t1(s nvarchar(20))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - # - # date, time, datetime - # - - def test_datetime(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(value, result) - - def test_datetime_fraction(self): - # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime - # supported is xxx000. - - value = datetime(2007, 1, 15, 3, 4, 5, 123000) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(value, result) - - def test_datetime_fraction_rounded(self): - # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the - # database supports. - - full = datetime(2007, 1, 15, 3, 4, 5, 123456) - rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", full) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(rounded, result) - - def test_date(self): - ver = self.get_sqlserver_version() - if ver < 10: # 2008 only - return # so pass / ignore - - value = date.today() - - self.cursor.execute("create table t1(d date)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(type(result), date) - self.assertEqual(value, result) - - def test_time(self): - ver = self.get_sqlserver_version() - if ver < 10: # 2008 only - return # so pass / ignore - - value = datetime.now().time() - - # We aren't yet writing values using the new extended time type so the value written to the database is only - # down to the second. - value = value.replace(microsecond=0) - - self.cursor.execute("create table t1(t time)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select t from t1").fetchone()[0] - self.assertEqual(type(result), time) - self.assertEqual(value, result) - - def test_datetime2(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime2)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(value, result) - - # - # ints and floats - # - - def test_int(self): - value = 1234 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_int(self): - value = -1 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_bigint(self): - input = 3000000000 - self.cursor.execute("create table t1(d bigint)") - self.cursor.execute("insert into t1 values (?)", input) - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(result, input) - - def test_overflow_int(self): - # python allows integers of any size, bigger than an 8 byte int can contain - input = 9999999999999999999999999999999999999 - self.cursor.execute("create table t1(d bigint)") - self.cnxn.commit() - self.assertRaises(OverflowError, self.cursor.execute, "insert into t1 values (?)", input) - result = self.cursor.execute("select * from t1").fetchall() - self.assertEqual(result, []) - - def test_float(self): - value = 1234.567 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_denorm_float(self): - value = 0.00012345 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - def test_non_numeric_float(self): - self.cursor.execute("create table t1(d float)") - self.cnxn.commit() - for input in (float('+Infinity'), float('-Infinity'), float('NaN')): - self.assertRaises(pyodbc.ProgrammingError, self.cursor.execute, "insert into t1 values (?)", input) - result = self.cursor.execute("select * from t1").fetchall() - self.assertEqual(result, []) - - # - # stored procedures - # - - # def test_callproc(self): - # "callproc with a simple input-only stored procedure" - # pass - - def test_sp_results(self): - self.cursor.execute( - """ - Create procedure proc1 - AS - select top 10 name, id, xtype, refdate - from sysobjects - """) - rows = self.cursor.execute("exec proc1").fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - - def test_sp_results_from_temp(self): - - # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. - # If you don't do this, you'd need to call nextset() once to skip it. - - self.cursor.execute( - """ - Create procedure proc1 - AS - set nocount on - select top 10 name, id, xtype, refdate - into #tmptable - from sysobjects - - select * from #tmptable - """) - self.cursor.execute("exec proc1") - self.assertTrue(self.cursor.description is not None) - self.assertTrue(len(self.cursor.description) == 4) - - rows = self.cursor.fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - - def test_sp_results_from_vartbl(self): - self.cursor.execute( - """ - Create procedure proc1 - AS - set nocount on - declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime) - - insert into @tmptbl - select top 10 name, id, xtype, refdate - from sysobjects - - select * from @tmptbl - """) - self.cursor.execute("exec proc1") - rows = self.cursor.fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - def test_sp_with_dates(self): - # Reported in the forums that passing two datetimes to a stored procedure doesn't work. - self.cursor.execute( - """ - if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) - drop procedure [dbo].[test_sp] - """) - self.cursor.execute( - """ - create procedure test_sp(@d1 datetime, @d2 datetime) - AS - declare @d as int - set @d = datediff(year, @d1, @d2) - select @d - """) - self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(rows[0][0] == 0) # 0 years apart - - def test_sp_with_none(self): - # Reported in the forums that passing None caused an error. - self.cursor.execute( - """ - if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) - drop procedure [dbo].[test_sp] - """) - self.cursor.execute( - """ - create procedure test_sp(@x varchar(20)) - AS - declare @y varchar(20) - set @y = @x - select @y - """) - self.cursor.execute("exec test_sp ?", None) - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(rows[0][0] == None) # 0 years apart - - - # - # rowcount - # - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - """ - Ensure Cursor.rowcount is set properly after a select statement. - - pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a - select statement, so we'll test for that behavior. This is valid behavior according to the DB API - specification, but people don't seem to like it. - """ - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, -1) - - rows = self.cursor.fetchall() - self.assertEqual(len(rows), count) - self.assertEqual(self.cursor.rowcount, -1) - - def test_rowcount_reset(self): - "Ensure rowcount is reset after DDL" - - ddl_rowcount = 0 if self.driver_type_is('freetds') else -1 - - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.assertEqual(self.cursor.rowcount, 1) - - self.cursor.execute("create table t2(i int)") - self.assertEqual(self.cursor.rowcount, ddl_rowcount) - - # - # always return Cursor - # - - # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very - # confusing when things went wrong and added very little value even when things went right since users could always - # use: cursor.execute("...").rowcount - - def test_retcursor_delete(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_select(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("select * from t1") - self.assertEqual(v, self.cursor) - - # - # misc - # - - def table_with_spaces(self): - "Ensure we can select using [x z] syntax" - - try: - self.cursor.execute("create table [test one](int n)") - self.cursor.execute("insert into [test one] values(1)") - self.cursor.execute("select * from [test one]") - v = self.cursor.fetchone()[0] - self.assertEqual(v, 1) - finally: - self.cnxn.rollback() - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_temp_select(self): - # A project was failing to create temporary tables via select into. - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(v, "testing") - - self.cursor.execute("select s into t2 from t1") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(v, "testing") - - # Money - # - # The inputs are strings so we don't have to deal with floating point rounding. - - for value in "-1234.56 -1 0 1 1234.56 123456789.21".split(): - name = str(value).replace('.', '_').replace('-', 'neg_') - locals()['test_money_%s' % name] = _simpletest('money', Decimal(str(value))) - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_one(self): - "Pass executemany a single sequence" - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, "test") ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - def test_executemany_dae_0(self): - """ - DAE for 0-length value - """ - self.cursor.execute("create table t1(a nvarchar(max))") - - self.cursor.fast_executemany = True - self.cursor.executemany("insert into t1(a) values(?)", [['']]) - - self.assertEqual(self.cursor.execute("select a from t1").fetchone()[0], '') - - self.cursor.fast_executemany = False - - def test_executemany_failure(self): - """ - Ensure that an exception is raised if one query in an executemany fails. - """ - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, 'good'), - ('error', 'not an int'), - (3, 'good') ] - - self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d varchar(50))"); - self.cursor.execute("insert into t1 values(1,2,3,'four')") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 'four')") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_concatenation(self): - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") - self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) - - row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() - - self.assertEqual(row.both, v2 + v3) - - def test_view_select(self): - # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. - - # Create a table (t1) with 3 rows and a view (t2) into it. - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") - for i in range(3): - self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) - self.cursor.execute("create view t2 as select * from t1") - - # Select from the view - self.cursor.execute("select * from t2") - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(len(rows) == 3) - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - def test_sqlserver_callproc(self): - try: - self.cursor.execute("drop procedure pyodbctest") - self.cnxn.commit() - except: - pass - - self.cursor.execute("create table t1(s varchar(10))") - self.cursor.execute("insert into t1 values(?)", "testing") - - self.cursor.execute(""" - create procedure pyodbctest @var1 varchar(32) - as - begin - select s - from t1 - return - end - """) - self.cnxn.commit() - - # for row in self.cursor.procedureColumns('pyodbctest'): - # print row.procedure_name, row.column_name, row.column_type, row.type_name - - self.cursor.execute("exec pyodbctest 'hi'") - - # print self.cursor.description - # for row in self.cursor: - # print row.s - - def test_skip(self): - # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. - - self.cursor.execute("create table t1(id int)"); - for i in range(1, 5): - self.cursor.execute("insert into t1 values(?)", i) - self.cursor.execute("select id from t1 order by id") - self.assertEqual(self.cursor.fetchone()[0], 1) - self.cursor.skip(2) - self.assertEqual(self.cursor.fetchone()[0], 4) - - def test_timeout(self): - self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) - - self.cnxn.timeout = 30 - self.assertEqual(self.cnxn.timeout, 30) - - self.cnxn.timeout = 0 - self.assertEqual(self.cnxn.timeout, 0) - - def test_sets_execute(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) - - self.assertRaises(pyodbc.ProgrammingError, f) - - def test_sets_executemany(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.executemany("insert into t1 (word) values (?)", [words]) - - self.assertRaises(TypeError, f) - - def test_row_execute(self): - "Ensure we can use a Row object as a parameter to execute" - self.cursor.execute("create table t1(n int, s varchar(10))") - self.cursor.execute("insert into t1 values (1, 'a')") - row = self.cursor.execute("select n, s from t1").fetchone() - self.assertNotEqual(row, None) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.execute("insert into t2 values (?, ?)", row) - - def test_row_executemany(self): - "Ensure we can use a Row object as a parameter to executemany" - self.cursor.execute("create table t1(n int, s varchar(10))") - - for i in range(3): - self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) - - rows = self.cursor.execute("select n, s from t1").fetchall() - self.assertNotEqual(len(rows), 0) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.executemany("insert into t2 values (?, ?)", rows) - - def test_description(self): - "Ensure cursor.description is correct" - - self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") - self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") - self.cursor.execute("select * from t1") - - # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the - # items I do know. - - # int - t = self.cursor.description[0] - self.assertEqual(t[0], 'n') - self.assertEqual(t[1], int) - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # varchar(8) - t = self.cursor.description[1] - self.assertEqual(t[0], 's') - self.assertEqual(t[1], str) - self.assertEqual(t[4], 8) # precision - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # decimal(5, 2) - t = self.cursor.description[2] - self.assertEqual(t[0], 'd') - self.assertEqual(t[1], Decimal) - self.assertEqual(t[4], 5) # precision - self.assertEqual(t[5], 2) # scale - self.assertEqual(t[6], True) # nullable - - def test_cursor_messages_with_print(self): - """ - Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement. - """ - # self.cursor is used in setUp, hence is not brand new at this point - brand_new_cursor = self.cnxn.cursor() - self.assertIsNone(brand_new_cursor.messages) - - # SQL Server PRINT statements are never more than 8000 characters - # https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks - for msg in ('hello world', 'ABCDEFGHIJ' * 800): - self.cursor.execute("PRINT '{}'".format(msg)) - messages = self.cursor.messages - self.assertTrue(type(messages) is list) - self.assertEqual(len(messages), 1) - self.assertTrue(type(messages[0]) is tuple) - self.assertEqual(len(messages[0]), 2) - self.assertTrue(type(messages[0][0]) is str) - self.assertTrue(type(messages[0][1]) is str) - self.assertEqual('[01000] (0)', messages[0][0]) - self.assertTrue(messages[0][1].endswith(msg)) - - def test_cursor_messages_with_stored_proc(self): - """ - Complex scenario to test the Cursor.messages attribute. - """ - self.cursor.execute(""" - CREATE OR ALTER PROCEDURE test_cursor_messages AS - BEGIN - SET NOCOUNT ON; - PRINT 'Message 1a'; - PRINT 'Message 1b'; - SELECT N'Field 1a' AS F UNION ALL SELECT N'Field 1b'; - SELECT N'Field 2a' AS F UNION ALL SELECT N'Field 2b'; - PRINT 'Message 2a'; - PRINT 'Message 2b'; - END - """) - # result set 1 - self.cursor.execute("EXEC test_cursor_messages") - rows = [tuple(r) for r in self.cursor.fetchall()] # convert pyodbc.Row objects for ease of use - self.assertEqual(len(rows), 2) - self.assertSequenceEqual(rows, [('Field 1a', ), ('Field 1b', )]) - self.assertEqual(len(self.cursor.messages), 2) - self.assertTrue(self.cursor.messages[0][1].endswith('Message 1a')) - self.assertTrue(self.cursor.messages[1][1].endswith('Message 1b')) - # result set 2 - self.assertTrue(self.cursor.nextset()) - rows = [tuple(r) for r in self.cursor.fetchall()] # convert pyodbc.Row objects for ease of use - self.assertEqual(len(rows), 2) - self.assertSequenceEqual(rows, [('Field 2a', ), ('Field 2b', )]) - self.assertEqual(self.cursor.messages, []) - # result set 3 - self.assertTrue(self.cursor.nextset()) - with self.assertRaises(pyodbc.ProgrammingError): - self.cursor.fetchall() - self.assertEqual(len(self.cursor.messages), 2) - self.assertTrue(self.cursor.messages[0][1].endswith('Message 2a')) - self.assertTrue(self.cursor.messages[1][1].endswith('Message 2b')) - # result set 4 (which shouldn't exist) - self.assertFalse(self.cursor.nextset()) - with self.assertRaises(pyodbc.ProgrammingError): - self.cursor.fetchall() - self.assertEqual(self.cursor.messages, []) - - def test_none_param(self): - "Ensure None can be used for params other than the first" - # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used - # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the - # first column, but did not work for later columns. - # - # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, - # binary/varbinary won't allow an implicit conversion. - - self.handle_known_issues_for('freetds', print_reminder=True) - - self.cursor.execute("create table t1(n int, blob varbinary(max))") - self.cursor.execute("insert into t1 values (1, newid())") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row.n, 1) - self.assertEqual(type(row.blob), bytes) - - sql = "update t1 set n=?, blob=?" - try: - self.cursor.execute(sql, 2, None) - except pyodbc.DataError: - if self.handle_known_issues_for('freetds'): - # FREETDS_KNOWN_ISSUE - # - # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so - # pyodbc can't call SQLDescribeParam to get the correct parameter type. - # This can lead to errors being returned from SQL Server when sp_prepexec is called, - # e.g., "Implicit conversion from data type varchar to varbinary(max) is not allowed." - # - # So at least verify that the user can manually specify the parameter type - self.cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) - self.cursor.execute(sql, 2, None) - else: - raise - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row.n, 2) - self.assertEqual(row.blob, None) - - - def test_output_conversion(self): - def convert1(value): - # The value is the raw bytes (as a bytes object) read from the - # database. We'll simply add an X at the beginning at the end. - return 'X' + value.decode('latin1') + 'X' - - def convert2(value): - # Same as above, but add a Y at the beginning at the end. - return 'Y' + value.decode('latin1') + 'Y' - - self.cursor.execute("create table t1(n int, v varchar(10))") - self.cursor.execute("insert into t1 values (1, '123.45')") - - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - - # Clear all conversions and try again. There should be no Xs this time. - self.cnxn.clear_output_converters() - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - - # Same but clear using remove_output_converter. - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - - self.cnxn.remove_output_converter(pyodbc.SQL_VARCHAR) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - - # Clear via add_output_converter, passing None for the converter function. - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - - # retrieve and temporarily replace converter (get_output_converter) - # - # case_1: converter already registered - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - prev_converter = self.cnxn.get_output_converter(pyodbc.SQL_VARCHAR) - self.assertNotEqual(prev_converter, None) - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'Y123.45Y') - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - # - # case_2: no converter already registered - self.cnxn.clear_output_converters() - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - prev_converter = self.cnxn.get_output_converter(pyodbc.SQL_VARCHAR) - self.assertEqual(prev_converter, None) - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'Y123.45Y') - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - - - def test_too_large(self): - """Ensure error raised if insert fails due to truncation""" - value = 'x' * 1000 - self.cursor.execute("create table t1(s varchar(800))") - def test(): - self.cursor.execute("insert into t1 values (?)", value) - # different versions of SQL Server generate different errors - self.assertRaises((pyodbc.DataError, pyodbc.ProgrammingError), test) - - def test_geometry_null_insert(self): - def convert(value): - return value - - self.cnxn.add_output_converter(-151, convert) # -151 is SQL Server's geometry - self.cursor.execute("create table t1(n int, v geometry)") - self.cursor.execute("insert into t1 values (?, ?)", 1, None) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, None) - self.cnxn.clear_output_converters() - - def test_login_timeout(self): - # This can only test setting since there isn't a way to cause it to block on the server side. - cnxns = pyodbc.connect(self.connection_string, timeout=2) - - def test_row_equal(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test')") - row1 = self.cursor.execute("select n, s from t1").fetchone() - row2 = self.cursor.execute("select n, s from t1").fetchone() - b = (row1 == row2) - self.assertEqual(b, True) - - def test_row_gtlt(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test1')") - self.cursor.execute("insert into t1 values (1, 'test2')") - rows = self.cursor.execute("select n, s from t1 order by s").fetchall() - self.assertTrue(rows[0] < rows[1]) - self.assertTrue(rows[0] <= rows[1]) - self.assertTrue(rows[1] > rows[0]) - self.assertTrue(rows[1] >= rows[0]) - self.assertTrue(rows[0] != rows[1]) - - rows = list(rows) - rows.sort() # uses < - - def test_context_manager_success(self): - "Ensure `with` commits if an exception is not raised" - self.cursor.execute("create table t1(n int)") - self.cnxn.commit() - - with self.cnxn: - self.cursor.execute("insert into t1 values (1)") - - rows = self.cursor.execute("select n from t1").fetchall() - self.assertEqual(len(rows), 1) - self.assertEqual(rows[0][0], 1) - - def test_context_manager_failure(self): - "Ensure `with` rolls back if an exception is raised" - # We'll insert a row and commit it. Then we'll insert another row followed by an - # exception. - - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (1)") - self.cnxn.commit() - - def _fail(): - with self.cnxn: - self.cursor.execute("insert into t1 values (2)") - self.cursor.execute("delete from bogus") - - self.assertRaises(pyodbc.Error, _fail) - - self.cursor.execute("select max(n) from t1") - val = self.cursor.fetchval() - self.assertEqual(val, 1) - - - def test_untyped_none(self): - # From issue 129 - value = self.cursor.execute("select ?", None).fetchone()[0] - self.assertEqual(value, None) - - def test_large_update_nodata(self): - self.cursor.execute('create table t1(a varbinary(max))') - hundredkb = b'x'*100*1024 - self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) - - def test_func_param(self): - self.cursor.execute(''' - create function func1 (@testparam varchar(4)) - returns @rettest table (param varchar(4)) - as - begin - insert @rettest - select @testparam - return - end - ''') - self.cnxn.commit() - value = self.cursor.execute("select * from func1(?)", 'test').fetchone()[0] - self.assertEqual(value, 'test') - - def test_no_fetch(self): - # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to - # confuse the driver. - self.cursor.execute('select 1') - self.cursor.execute('select 1') - self.cursor.execute('select 1') - - def test_drivers(self): - drivers = pyodbc.drivers() - self.assertEqual(list, type(drivers)) - self.assertTrue(len(drivers) > 0) - - m = re.search('DRIVER={?([^}]+?)}?;', self.connection_string, re.IGNORECASE) - if m: # issue #1000 - may be testing with DSN= connection - current = m.group(1) - self.assertTrue(current in drivers) - - def test_decode_meta(self): - """ - Ensure column names with non-ASCII characters are converted using the configured encodings. - """ - # This is from GitHub issue #190 - self.cursor.execute("create table t1(a int)") - self.cursor.execute("insert into t1 values (1)") - self.cursor.execute('select a as "Tipología" from t1') - self.assertEqual(self.cursor.description[0][0], "Tipología") - - def test_exc_integrity(self): - "Make sure an IntegretyError is raised" - # This is really making sure we are properly encoding and comparing the SQLSTATEs. - self.cursor.execute("create table t1(s1 varchar(10) primary key)") - self.cursor.execute("insert into t1 values ('one')") - self.assertRaises(pyodbc.IntegrityError, self.cursor.execute, "insert into t1 values ('one')") - - def test_columns(self): - # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error - # - # Error: TypeError: argument 2 must be str, not None - # - # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an - # optional string keyword when calling indirectly. - - self.cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))") - - self.cursor.columns('t1') - results = {row.column_name: row for row in self.cursor} - row = results['a'] - assert row.type_name == 'int', row.type_name - row = results['b'] - assert row.type_name == 'varchar' - assert row.column_size == 3 - - # Now do the same, but specifically pass in None to one of the keywords. Old versions - # were parsing arguments incorrectly and would raise an error. (This crops up when - # calling indirectly like columns(*args, **kwargs) which aiodbc does.) - - self.cursor.columns('t1', schema=None, catalog=None) - results = {row.column_name: row for row in self.cursor} - row = results['a'] - assert row.type_name == 'int', row.type_name - row = results['b'] - assert row.type_name == 'varchar' - assert row.column_size == 3 - row = results['xΏz'] - assert row.type_name == 'varchar' - assert row.column_size == 4, row.column_size - - # - for i in range(8, 16): - table_name = 'pyodbc_89abcdef'[:i] - - self.cursor.execute("""\ - IF OBJECT_ID (N'{0}', N'U') IS NOT NULL DROP TABLE {0}; - CREATE TABLE {0} (id INT PRIMARY KEY); - """.format(table_name)) - - col_count = len([col.column_name for col in self.cursor.columns(table_name)]) - # print('table [{}] ({} characters): {} columns{}'.format(table_name, i, col_count, ' <-' if col_count == 0 else '')) - self.assertEqual(col_count, 1) - - self.cursor.execute("DROP TABLE {};".format(table_name)) - # - - def test_cancel(self): - # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with - # making sure SQLCancel is called correctly. - self.cursor.execute("select 1") - self.cursor.cancel() - - def test_emoticons_as_parameter(self): - # https://github.com/mkleehammer/pyodbc/issues/423 - # - # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number - # of characters. Ensure it works even with 4-byte characters. - # - # http://www.fileformat.info/info/unicode/char/1f31c/index.htm - - v = "x \U0001F31C z" - - self.cursor.execute("create table t1(s nvarchar(100))") - self.cursor.execute("insert into t1 values (?)", v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def test_emoticons_as_literal(self): - # similar to `test_emoticons_as_parameter`, above, except for Unicode literal - # - # http://www.fileformat.info/info/unicode/char/1f31c/index.htm - - # FreeTDS ODBC issue fixed in version 1.1.23 - # https://github.com/FreeTDS/freetds/issues/317 - - v = "x \U0001F31C z" - - self.cursor.execute("create table t1(s nvarchar(100))") - self.cursor.execute("insert into t1 values (N'%s')" % v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def _test_tvp(self, diff_schema): - # https://github.com/mkleehammer/pyodbc/issues/290 - # - # pyodbc supports queries with table valued parameters in sql server - # - - if self.handle_known_issues_for('freetds', print_reminder=True): - warn('FREETDS_KNOWN_ISSUE - test_tvp: test cancelled.') - return - - procname = 'SelectTVP' - typename = 'TestTVP' - - if diff_schema: - schemaname = 'myschema' - procname = schemaname + '.' + procname - typenameonly = typename - typename = schemaname + '.' + typename - - # (Don't use "if exists" since older SQL Servers don't support it.) - try: - self.cursor.execute("drop procedure " + procname) - except: - pass - try: - self.cursor.execute("drop type " + typename) - except: - pass - if diff_schema: - try: - self.cursor.execute("drop schema " + schemaname) - except: - pass - self.cursor.commit() - - if diff_schema: - self.cursor.execute("CREATE SCHEMA myschema") - self.cursor.commit() - - query = "CREATE TYPE %s AS TABLE("\ - "c01 VARCHAR(255),"\ - "c02 VARCHAR(MAX),"\ - "c03 VARBINARY(255),"\ - "c04 VARBINARY(MAX),"\ - "c05 BIT,"\ - "c06 DATE,"\ - "c07 TIME,"\ - "c08 DATETIME2(5),"\ - "c09 BIGINT,"\ - "c10 FLOAT,"\ - "c11 NUMERIC(38, 24),"\ - "c12 UNIQUEIDENTIFIER)" % typename - - self.cursor.execute(query) - self.cursor.commit() - self.cursor.execute("CREATE PROCEDURE %s @TVP %s READONLY AS SELECT * FROM @TVP;" % (procname, typename)) - self.cursor.commit() - - long_string = '' - long_bytearray = [] - for i in range(255): - long_string += chr((i % 95) + 32) - long_bytearray.append(i % 255) - - very_long_string = '' - very_long_bytearray = [] - for i in range(2000000): - very_long_string += chr((i % 95) + 32) - very_long_bytearray.append(i % 255) - - c01 = ['abc', '', long_string] - - c02 = ['abc', '', very_long_string] - - c03 = [bytearray([0xD1, 0xCE, 0xFA, 0xCE]), - bytearray([0x00, 0x01, 0x02, 0x03, 0x04]), - bytearray(long_bytearray)] - - c04 = [bytearray([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), - bytearray([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), - bytearray(very_long_bytearray)] - - c05 = [1, 0, 1] - - c06 = [date(1997, 8, 29), - date(1, 1, 1), - date(9999, 12, 31)] - - c07 = [time(9, 13, 39), - time(0, 0, 0), - time(23, 59, 59)] - - c08 = [datetime(2018, 11, 13, 13, 33, 26, 298420), - datetime(1, 1, 1, 0, 0, 0, 0), - datetime(9999, 12, 31, 23, 59, 59, 999990)] - - c09 = [1234567, -9223372036854775808, 9223372036854775807] - - c10 = [3.14, -1.79E+308, 1.79E+308] - - c11 = [Decimal('31234567890123.141243449787580175325274'), - Decimal( '0.000000000000000000000001'), - Decimal('99999999999999.999999999999999999999999')] - - c12 = ['4FE34A93-E574-04CC-200A-353F0D1770B1', - '33F7504C-2BAC-1B83-01D1-7434A7BA6A17', - 'FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF'] - - param_array = [] - - for i in range (3): - param_array.append([c01[i], c02[i], c03[i], c04[i], c05[i], c06[i], c07[i], c08[i], c09[i], c10[i], c11[i], c12[i]]) - - success = True - - try: - p1 = [param_array] - if diff_schema: - p1 = [ [ typenameonly, schemaname ] + param_array ] - result_array = self.cursor.execute("exec %s ?" % procname, p1).fetchall() - except Exception as ex: - print("Failed to execute SelectTVP") - print("Exception: [" + type(ex).__name__ + "]" , ex.args) - - success = False - else: - for r in range(len(result_array)): - for c in range(len(result_array[r])): - if(result_array[r][c] != param_array[r][c]): - print("Mismatch at row " + str(r+1) + ", column " + str(c+1) + "; expected:", param_array[r][c] , " received:", result_array[r][c]) - success = False - - try: - p1 = [[]] - if diff_schema: - p1 = [ [ typenameonly, schemaname ] + [] ] - result_array = self.cursor.execute("exec %s ?" % procname, p1).fetchall() - self.assertEqual(result_array, []) - except Exception as ex: - print("Failed to execute SelectTVP") - print("Exception: [" + type(ex).__name__ + "]", ex.args) - success = False - - self.assertEqual(success, True) - - def test_columns(self): - self.cursor.execute( - """ - create table t1(n int, d datetime, c nvarchar(100)) - """) - - self.cursor.columns(table='t1') - names = {row.column_name for row in self.cursor.fetchall()} - assert names == {'n', 'd', 'c'}, 'names=%r' % names - - self.cursor.columns(table='t1', column='c') - row = self.cursor.fetchone() - assert row.column_name == 'c' - - def test_tvp(self): - self._test_tvp(False) - - def test_tvp_diffschema(self): - self._test_tvp(True) - -def main(): - from argparse import ArgumentParser - parser = ArgumentParser(usage=usage) - parser.add_argument("-v", "--verbose", action="count", default=0, help="increment test verbosity (can be used multiple times)") - parser.add_argument("-d", "--debug", action="store_true", default=False, help="print debugging items") - parser.add_argument("-t", "--test", help="run only the named test") - parser.add_argument("--sqlserver", nargs='*', help="connection string(s) for SQL Server") - # typically, the connection string is provided as the only parameter, so handle this case - parser.add_argument('conn_str', nargs='*', help="connection string for SQL Server") - args = parser.parse_args() - - if len(args.conn_str) > 1: - parser.error('Only one positional argument is allowed. Do you need quotes around the connection string?') - - if args.sqlserver is not None: - connection_strings = args.sqlserver - elif len(args.conn_str) == 1 and args.conn_str[0]: - connection_strings = [args.conn_str[0]] - else: - config_conn_string = testutils.load_setup_connection_string('sqlservertests') - if config_conn_string is None: - parser.print_help() - return True # no connection string, therefore nothing to do - else: - connection_strings = [config_conn_string] - - if args.verbose: - cnxn = pyodbc.connect(connection_strings[0]) - testutils.print_library_info(cnxn) - cnxn.close() - - overall_result = True - for connection_string in connection_strings: - print(f'Running tests with connection string: {connection_string}') - suite = testutils.load_tests(SqlServerTestCase, args.test, connection_string) - testRunner = unittest.TextTestRunner(verbosity=args.verbose) - result = testRunner.run(suite) - if not result.wasSuccessful(): - overall_result = False - - return overall_result - - -if __name__ == '__main__': - - # add the build directory to the Python path so we're testing the latest - # build, not the pip-installed version - testutils.add_to_path() - - # only after setting the Python path, import the pyodbc module - import pyodbc - - # run the tests - sys.exit(0 if main() else 1) diff --git a/tests3/test.py b/tests3/test.py deleted file mode 100644 index a4ae2b33..00000000 --- a/tests3/test.py +++ /dev/null @@ -1,17 +0,0 @@ - -from testutils import * -add_to_path() - -import pyodbc - -cnxn = pyodbc.connect("DRIVER={SQL Server Native Client 10.0};SERVER=localhost;DATABASE=test;Trusted_Connection=yes") -print('cnxn:', cnxn) - -cursor = cnxn.cursor() -print('cursor:', cursor) - -cursor.execute("select 1") -row = cursor.fetchone() -print('row:', row) - - diff --git a/tests3/testbase.py b/tests3/testbase.py deleted file mode 100644 index 6c57b9fe..00000000 --- a/tests3/testbase.py +++ /dev/null @@ -1,25 +0,0 @@ - -import unittest - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class TestBase(unittest.TestCase): - - diff --git a/tests3/testutils.py b/tests3/testutils.py deleted file mode 100644 index 0f946340..00000000 --- a/tests3/testutils.py +++ /dev/null @@ -1,122 +0,0 @@ -from datetime import datetime -import importlib.machinery -import os -from os.path import join, dirname, abspath -import platform -import sys -import unittest - - -def add_to_path(): - """ - Prepends the build directory to the path so that newly built pyodbc libraries are - used, allowing it to be tested without pip-installing it. - """ - # look for the suffixes used in the build filenames, e.g. ".cp38-win_amd64.pyd", - # ".cpython-38-darwin.so", ".cpython-38-x86_64-linux-gnu.so", etc. - library_exts = [ext for ext in importlib.machinery.EXTENSION_SUFFIXES if ext != '.pyd'] - # generate the name of the pyodbc build file(s) - library_names = ['pyodbc%s' % ext for ext in library_exts] - - # the build directory is assumed to be one directory up from this file - build_dir = join(dirname(dirname(abspath(__file__))), 'build') - - # find all the relevant pyodbc build files, and get their modified dates - file_info = [ - (os.path.getmtime(join(dirpath, file)), join(dirpath, file)) - for dirpath, dirs, files in os.walk(build_dir) - for file in files - if file in library_names - ] - if file_info: - file_info.sort() # put them in chronological order - library_modified_dt, library_path = file_info[-1] # use the latest one - # add the build directory to the Python path - sys.path.insert(0, dirname(library_path)) - print('Library: {} (last modified {})'.format(library_path, datetime.fromtimestamp(library_modified_dt))) - else: - print('Did not find the pyodbc library in the build directory. Will use the installed version.') - - -def print_library_info(cnxn): - import pyodbc - print('python: %s' % sys.version) - print('pyodbc: %s %s' % (pyodbc.version, os.path.abspath(pyodbc.__file__))) - print('odbc: %s' % cnxn.getinfo(pyodbc.SQL_ODBC_VER)) - print('driver: %s %s' % (cnxn.getinfo(pyodbc.SQL_DRIVER_NAME), cnxn.getinfo(pyodbc.SQL_DRIVER_VER))) - print(' supports ODBC version %s' % cnxn.getinfo(pyodbc.SQL_DRIVER_ODBC_VER)) - print('os: %s' % platform.system()) - print('unicode: Py_Unicode=%s SQLWCHAR=%s' % (pyodbc.UNICODE_SIZE, pyodbc.SQLWCHAR_SIZE)) - - cursor = cnxn.cursor() - for typename in ['VARCHAR', 'WVARCHAR', 'BINARY']: - t = getattr(pyodbc, 'SQL_' + typename) - cursor.getTypeInfo(t) - row = cursor.fetchone() - print('Max %s = %s' % (typename, row and row[2] or '(not supported)')) - - if platform.system() == 'Windows': - print(' %s' % ' '.join([s for s in platform.win32_ver() if s])) - - -def discover_and_run(top_level_dir='.', start_dir='.', pattern='test*.py', verbosity=0): - """Finds all the test cases in the start directory and runs them""" - tests = unittest.defaultTestLoader.discover(top_level_dir=top_level_dir, start_dir=start_dir, pattern=pattern) - runner = unittest.TextTestRunner(verbosity=verbosity) - result = runner.run(tests) - return result - - -def load_tests(testclass, name, *args): - """ - Returns a TestSuite for tests in `testclass`. - - name - Optional test name if you only want to run 1 test. If not provided all tests in `testclass` will be loaded. - - args - Arguments for the test class constructor. These will be passed after the test method name. - """ - if name: - if not name.startswith('test_'): - name = 'test_%s' % name - names = [ name ] - - else: - names = [ method for method in dir(testclass) if method.startswith('test_') ] - - return unittest.TestSuite([ testclass(name, *args) for name in names ]) - - -def load_setup_connection_string(section): - """ - Attempts to read the default connection string from the setup.cfg file. - - If the file does not exist or if it exists but does not contain the connection string, None is returned. If the - file exists but cannot be parsed, an exception is raised. - """ - from os.path import exists, join, dirname - from configparser import ConfigParser - - FILENAME = 'setup.cfg' - KEY = 'connection-string' - - path = dirname(abspath(__file__)) - while True: - fqn = join(path, 'tmp', FILENAME) - if exists(fqn): - break - parent = dirname(path) - print('{} --> {}'.format(path, parent)) - if parent == path: - return None - path = parent - - try: - p = ConfigParser() - p.read(fqn) - except: - raise SystemExit('Unable to parse %s: %s' % (path, sys.exc_info()[1])) - - if p.has_option(section, KEY): - return p.get(section, KEY)