From 53db7b51dd684703a290bc70209aef559319ee7b Mon Sep 17 00:00:00 2001 From: Michael Kleehammer Date: Sat, 19 Nov 2022 01:13:35 -0600 Subject: [PATCH 01/39] WIP: Started porting build and PostgreSQL tests to py3 --- README.md | 9 +- appveyor/after_test.cmd | 13 - appveyor/compile.cmd | 80 - appveyor/test_connect.py | 4 - setup.py | 285 +-- {tests3 => tests}/__init__.py | 0 {tests3 => tests}/accesstests.py | 0 {tests2 => tests}/dbapi20.py | 0 .../dbapi_SQLParamData_memory__test.py | 0 ...QLParamData_memory__test__requirements.txt | 0 {tests3 => tests}/dbapitests.py | 0 {tests2 => tests}/empty.accdb | Bin {tests2 => tests}/empty.mdb | Bin {tests3 => tests}/exceltests.py | 0 {tests3 => tests}/informixtests.py | 0 {tests3 => tests}/issue802.py | 0 {tests3 => tests}/issue998.py | 0 {tests3 => tests}/mysqltests.py | 0 {tests3 => tests}/run_tests.py | 0 {tests3 => tests}/sparktests.py | 0 {tests3 => tests}/sqldwtests.py | 0 {tests3 => tests}/sqlitetests.py | 0 {tests3 => tests}/sqlservertests.py | 0 {tests3 => tests}/test.py | 0 tests/test_postgresql.py | 591 +++++ {tests2 => tests}/testbase.py | 0 {tests3 => tests}/testutils.py | 0 tests2/accesstests.py | 671 ------ tests2/dbapitests.py | 45 - tests2/exceltests.py | 143 -- tests2/informixtests.py | 1275 ----------- tests2/mysqltests.py | 762 ------ tests2/pgtests.py | 615 ----- tests2/sqldwtests.py | 1499 ------------ tests2/sqlite.db | Bin 2048 -> 0 bytes tests2/sqlitetests.py | 722 ------ tests2/sqlservertests.py | 2036 ----------------- tests2/test.py | 41 - tests2/test.xls | Bin 17920 -> 0 bytes tests2/testutils.py | 119 - tests3/dbapi20.py | 850 ------- tests3/empty.accdb | Bin 311296 -> 0 bytes tests3/empty.mdb | Bin 188416 -> 0 bytes tests3/testbase.py | 25 - 44 files changed, 633 insertions(+), 9152 deletions(-) delete mode 100644 appveyor/after_test.cmd delete mode 100644 appveyor/compile.cmd delete mode 100644 appveyor/test_connect.py rename {tests3 => tests}/__init__.py (100%) rename {tests3 => tests}/accesstests.py (100%) rename {tests2 => tests}/dbapi20.py (100%) mode change 100755 => 100644 rename {tests3 => tests}/dbapi_SQLParamData_memory__test.py (100%) rename {tests3 => tests}/dbapi_SQLParamData_memory__test__requirements.txt (100%) rename {tests3 => tests}/dbapitests.py (100%) rename {tests2 => tests}/empty.accdb (100%) rename {tests2 => tests}/empty.mdb (100%) rename {tests3 => tests}/exceltests.py (100%) rename {tests3 => tests}/informixtests.py (100%) rename {tests3 => tests}/issue802.py (100%) rename {tests3 => tests}/issue998.py (100%) rename {tests3 => tests}/mysqltests.py (100%) rename {tests3 => tests}/run_tests.py (100%) rename {tests3 => tests}/sparktests.py (100%) rename {tests3 => tests}/sqldwtests.py (100%) rename {tests3 => tests}/sqlitetests.py (100%) rename {tests3 => tests}/sqlservertests.py (100%) rename {tests3 => tests}/test.py (100%) create mode 100644 tests/test_postgresql.py rename {tests2 => tests}/testbase.py (100%) mode change 100755 => 100644 rename {tests3 => tests}/testutils.py (100%) delete mode 100755 tests2/accesstests.py delete mode 100755 tests2/dbapitests.py delete mode 100755 tests2/exceltests.py delete mode 100755 tests2/informixtests.py delete mode 100755 tests2/mysqltests.py delete mode 100755 tests2/pgtests.py delete mode 100644 tests2/sqldwtests.py delete mode 100644 tests2/sqlite.db delete mode 100755 tests2/sqlitetests.py delete mode 100755 tests2/sqlservertests.py delete mode 100755 tests2/test.py delete mode 100644 tests2/test.xls delete mode 100755 tests2/testutils.py delete mode 100644 tests3/dbapi20.py delete mode 100644 tests3/empty.accdb delete mode 100644 tests3/empty.mdb delete mode 100644 tests3/testbase.py diff --git a/README.md b/README.md index 53540f37..8079eb2d 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,10 @@ # pyodbc -[![AppVeyor](https://ci.appveyor.com/api/projects/status/github/mkleehammer/pyodbc?branch=master&svg=true&passingText=Windows%20build&failingText=Windows%20build)](https://ci.appveyor.com/project/mkleehammer/pyodbc) -[![Github Actions - Ubuntu Build](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml/badge.svg?branch=master)](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml) +[![Ubuntu build](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml/badge.svg)](https://github.com/mkleehammer/pyodbc/actions/workflows/ubuntu_build.yml) [![PyPI](https://img.shields.io/pypi/v/pyodbc?color=brightgreen)](https://pypi.org/project/pyodbc/) pyodbc is an open source Python module that makes accessing ODBC databases simple. It -implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) specification but is -packed with even more Pythonic convenience. +implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) specification but is packed with even more Pythonic convenience. The easiest way to install pyodbc is to use pip: @@ -31,6 +29,3 @@ compiler. See the [docs](https://github.com/mkleehammer/pyodbc/wiki/Install) fo [Documentation](https://github.com/mkleehammer/pyodbc/wiki) [Release Notes](https://github.com/mkleehammer/pyodbc/releases) - -IMPORTANT: Python 2.7 support is being ended. The pyodbc 4.x versions will be the last to -support Python 2.7. The pyodbc 5.x versions will support only Python 3.7 and above. diff --git a/appveyor/after_test.cmd b/appveyor/after_test.cmd deleted file mode 100644 index 69bd132e..00000000 --- a/appveyor/after_test.cmd +++ /dev/null @@ -1,13 +0,0 @@ -IF "%APVYR_GENERATE_WHEELS%" == "true" ( - ECHO *** pip install the "wheel" module - "%PYTHON_HOME%\python" -m pip install wheel --quiet --no-warn-script-location - ECHO. - ECHO *** Generate the wheel file - %WITH_COMPILER% "%PYTHON_HOME%\python" setup.py bdist_wheel - ECHO. - ECHO *** \dist directory listing: - DIR /B dist -) ELSE ( - ECHO *** Skipping generation of the wheel file - ECHO. -) diff --git a/appveyor/compile.cmd b/appveyor/compile.cmd deleted file mode 100644 index d88ded94..00000000 --- a/appveyor/compile.cmd +++ /dev/null @@ -1,80 +0,0 @@ -:: To build extensions for 64 bit Python 2, we need to configure environment -:: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of: -:: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0) -:: -:: To build extensions for 64 bit Python 3, we need to configure environment -:: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: -:: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1) -:: -:: 32 bit builds, and 64-bit builds for 3.5 and beyond, do not require specific -:: environment configurations. -:: -:: Note: this script needs to be run with the /E:ON and /V:ON flags for the -:: cmd interpreter, at least for (SDK v7.0) -:: -:: More details at: -:: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows -:: http://stackoverflow.com/a/13751649/163740 -:: -:: Author: Olivier Grisel -:: License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ -:: -:: The repeated CALL commands at the end of this file look redundant, but -:: if you move them outside the IF clauses, they do not run properly in -:: the SET_SDK_64==Y case, I don't know why. -@ECHO OFF - -SET COMMAND_TO_RUN=%* -SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows -SET WIN_WDK=C:\Program Files (x86)\Windows Kits\10\Include\wdf - -:: Extract the major and minor versions of the current Python interpreter, and bitness -FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write(str(sys.version_info.major))"`) DO ( -SET PYTHON_MAJOR_VERSION=%%F -) -FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write(str(sys.version_info.minor))"`) DO ( -SET PYTHON_MINOR_VERSION=%%F -) -FOR /F "tokens=* USEBACKQ" %%F IN (`%PYTHON_HOME%\python -c "import sys; sys.stdout.write('64' if sys.maxsize > 2**32 else '32')"`) DO ( -SET PYTHON_ARCH=%%F -) -ECHO Inferred Python version (major, minor, arch): %PYTHON_MAJOR_VERSION% %PYTHON_MINOR_VERSION% %PYTHON_ARCH% - -:: Based on the Python version, determine what SDK version to use, and whether -:: to set the SDK for 64-bit. -IF %PYTHON_MAJOR_VERSION% EQU 2 ( - SET WINDOWS_SDK_VERSION="v7.0" - SET SET_SDK_64=Y -) ELSE ( - IF %PYTHON_MAJOR_VERSION% EQU 3 ( - SET WINDOWS_SDK_VERSION="v7.1" - SET SET_SDK_64=N - IF EXIST "%WIN_WDK%" ( - :: See: https://connect.microsoft.com/VisualStudio/feedback/details/1610302/ - REN "%WIN_WDK%" 0wdf - ) - ) ELSE ( - ECHO Unsupported Python version: "%PYTHON_MAJOR_VERSION%" - EXIT 1 - ) -) - -IF %PYTHON_ARCH% EQU 64 ( - IF %SET_SDK_64% == Y ( - ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %PYTHON_MAJOR_VERSION% on a 64 bit architecture - SET DISTUTILS_USE_SDK=1 - SET MSSdk=1 - "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION% - "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release - ECHO Executing: %COMMAND_TO_RUN% - CALL %COMMAND_TO_RUN% || EXIT 1 - ) ELSE ( - ECHO Using default MSVC build environment for 64 bit architecture - ECHO Executing: %COMMAND_TO_RUN% - CALL %COMMAND_TO_RUN% || EXIT 1 - ) -) ELSE ( - ECHO Using default MSVC build environment for 32 bit architecture - ECHO Executing: %COMMAND_TO_RUN% - CALL %COMMAND_TO_RUN% || EXIT 1 -) diff --git a/appveyor/test_connect.py b/appveyor/test_connect.py deleted file mode 100644 index 1acf7ebb..00000000 --- a/appveyor/test_connect.py +++ /dev/null @@ -1,4 +0,0 @@ -import sys -import pyodbc -c = pyodbc.connect(sys.argv[1]) -c.close() diff --git a/setup.py b/setup.py index 3f150612..e8d3b3d3 100755 --- a/setup.py +++ b/setup.py @@ -1,158 +1,69 @@ #!/usr/bin/env python +VERSION = '5.0.0' + import sys, os, re, shlex from os.path import exists, abspath, dirname, join, isdir, relpath, expanduser +from inspect import cleandoc -try: - # Allow use of setuptools so eggs can be built. - from setuptools import setup, Command -except ImportError: - from distutils.core import setup, Command - -from distutils.extension import Extension -from distutils.errors import * - -if sys.hexversion >= 0x03000000: - from configparser import ConfigParser -else: - from ConfigParser import ConfigParser - -OFFICIAL_BUILD = 9999 - -# This version identifier should refer to the NEXT release, not the -# current one. After each release, the version should be incremented. -VERSION = '4.0.39' - - -def _print(s): - # Python 2/3 compatibility - sys.stdout.write(s + '\n') - - -class VersionCommand(Command): - - description = "prints the pyodbc version, determined from git" - - user_options = [] - - def initialize_options(self): - self.verbose = 0 - - def finalize_options(self): - pass - - def run(self): - version_str, _version = get_version() - sys.stdout.write(version_str + '\n') - - -class TagsCommand(Command): - - description = 'runs etags' - - user_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - # Windows versions of etag do not seem to expand wildcards (which Unix shells normally do for Unix utilities), - # so find all of the files ourselves. - files = [ join('src', f) for f in os.listdir('src') if f.endswith(('.h', '.cpp')) ] - cmd = 'etags %s' % ' '.join(files) - return os.system(cmd) +from setuptools import setup, Command +from setuptools.extension import Extension +from setuptools.errors import * +from configparser import ConfigParser def main(): - - version_str, version = get_version() - - with open(join(dirname(abspath(__file__)), 'README.md')) as f: - long_description = f.read() - - settings = get_compiler_settings(version_str) + settings = get_compiler_settings() files = [ relpath(join('src', f)) for f in os.listdir('src') if f.endswith('.cpp') ] if exists('MANIFEST'): os.remove('MANIFEST') - kwargs = { - 'name': "pyodbc", - 'version': version_str, - 'description': "DB API Module for ODBC", - - 'long_description': long_description, - 'long_description_content_type': 'text/markdown', - - 'maintainer': "Michael Kleehammer", - 'maintainer_email': "michael@kleehammer.com", - - 'ext_modules': [Extension('pyodbc', sorted(files), **settings)], - - 'packages': [''], - 'package_dir': {'': 'src'}, - 'package_data': {'': ['pyodbc.pyi']}, # places pyodbc.pyi alongside pyodbc.{platform}.{pyd|so} in site-packages - - 'license': 'MIT', - - 'python_requires': '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*', - - 'classifiers': ['Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'Intended Audience :: System Administrators', - 'License :: OSI Approved :: MIT License', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: POSIX', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - 'Topic :: Database', - ], - - 'url': 'https://github.com/mkleehammer/pyodbc', - 'cmdclass': { 'version' : VersionCommand, - 'tags' : TagsCommand } - } - - if sys.hexversion >= 0x02060000: - kwargs['options'] = { + setup( + name="pyodbc", + version=VERSION, + description="DB API Module for ODBC", + long_description=cleandoc(""" + pyodbc is an open source Python module that makes accessing ODBC databases simple. + It implements the [DB API 2.0](https://www.python.org/dev/peps/pep-0249) + specification but is packed with even more Pythonic convenience."""), + maintainer= "Michael Kleehammer", + maintainer_email="michael@kleehammer.com", + url='https://github.com/mkleehammer/pyodbc', + ext_modules=[Extension('pyodbc', sorted(files), **settings)], + data_files=[ + ('', ['src/pyodbc.pyi']) # places pyodbc.pyi alongside pyodbc.py in site-packages + ], + license='MIT', + python_requires='>=3.7', + classifiers=['Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: MIT License', + 'Operating System :: Microsoft :: Windows', + 'Operating System :: POSIX', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Topic :: Database', + ], + options={ 'bdist_wininst': {'user_access_control' : 'auto'} - } - - setup(**kwargs) + } + ) -def get_compiler_settings(version_str): +def get_compiler_settings(): settings = { 'extra_compile_args' : [], 'extra_link_args': [], 'libraries': [], 'include_dirs': [], - 'define_macros' : [ ('PYODBC_VERSION', version_str) ] + 'define_macros' : [ ('PYODBC_VERSION', VERSION) ] } - # This isn't the best or right way to do this, but I don't see how someone is supposed to sanely subclass the build - # command. - for option in ['assert', 'trace', 'leak-check']: - try: - sys.argv.remove('--%s' % option) - settings['define_macros'].append(('PYODBC_%s' % option.replace('-', '_').upper(), 1)) - except ValueError: - pass - if os.name == 'nt': settings['extra_compile_args'].extend([ '/Wall', @@ -251,121 +162,5 @@ def get_compiler_settings(version_str): return settings -def get_version(): - """ - Returns the version of the product as (description, [major,minor,micro,beta]). - - If the release is official, `beta` will be 9999 (OFFICIAL_BUILD). - - 1. If in a git repository, use the latest tag (git describe). - 2. If in an unzipped source directory (from setup.py sdist), - read the version from the PKG-INFO file. - 3. Use 4.0.0.dev0 and complain a lot. - """ - # My goal is to (1) provide accurate tags for official releases but (2) not have to manage tags for every test - # release. - # - # Official versions are tagged using 3 numbers: major, minor, micro. A build of a tagged version should produce - # the version using just these pieces, such as 2.1.4. - # - # Unofficial versions are "working towards" the next version. So the next unofficial build after 2.1.4 would be a - # beta for 2.1.5. Using 'git describe' we can find out how many changes have been made after 2.1.4 and we'll use - # this count as the beta id (beta1, beta2, etc.) - # - # Since the 4 numbers are put into the Windows DLL, we want to make sure the beta versions sort *before* the - # official, so we set the official build number to 9999, but we don't show it. - - name = None # branch/feature name. Should be None for official builds. - numbers = None # The 4 integers that make up the version. - - # If we are in the CICD pipeline, use the VERSION. There is no tagging information available - # because Github Actions fetches the repo with the options --no-tags and --depth=1. - - # CI providers (Github Actions / Travis / CircleCI / AppVeyor / etc.) typically set CI to "true", but - # in cibuildwheel linux containers, the usual CI env vars are not available, only CIBUILDWHEEL. - if os.getenv('CI', 'false').lower() == 'true' or 'CIBUILDWHEEL' in os.environ: - name = VERSION - numbers = [int(p) for p in VERSION.split('.')] - return name, numbers - - # If this is a source release the version will have already been assigned and be in the PKG-INFO file. - - name, numbers = _get_version_pkginfo() - - # If not a source release, we should be in a git repository. Look for the latest tag. - - if not numbers: - name, numbers = _get_version_git() - - if not numbers: - _print('WARNING: Unable to determine version. Using 4.0.0.0') - name, numbers = '4.0.dev0', [4,0,0,0] - - return name, numbers - - -def _get_version_pkginfo(): - filename = join(dirname(abspath(__file__)), 'PKG-INFO') - if exists(filename): - re_ver = re.compile(r'^Version: \s+ (\d+)\.(\d+)\.(\d+) (?: b(\d+))?', re.VERBOSE) - for line in open(filename): - match = re_ver.search(line) - if match: - name = line.split(':', 1)[1].strip() - numbers = [int(n or 0) for n in match.groups()[:3]] - numbers.append(int(match.group(4) or OFFICIAL_BUILD)) # don't use 0 as a default for build - return name, numbers - - return None, None - - -def _get_version_git(): - """ - If this is a git repo, returns the version as text and the version as a list of 4 subparts: - ("4.0.33", [4, 0, 33, 9999]). - - If this is not a git repo, (None, None) is returned. - """ - n, result = getoutput("git describe --tags --match [0-9]*") - if n: - _print('WARNING: git describe failed with: %s %s' % (n, result)) - return None, None - match = re.match(r'(\d+).(\d+).(\d+) (?: -(\d+)-g[0-9a-z]+)?', result, re.VERBOSE) - if not match: - return None, None - - numbers = [int(n or OFFICIAL_BUILD) for n in match.groups()] - if numbers[-1] == OFFICIAL_BUILD: - name = '%s.%s.%s' % tuple(numbers[:3]) - if numbers[-1] != OFFICIAL_BUILD: - # This is a beta of the next micro release, so increment the micro number to reflect this. - numbers[-2] += 1 - name = '%s.%s.%sb%d' % tuple(numbers) - - n, result = getoutput('git rev-parse --abbrev-ref HEAD') - - if result == 'HEAD': - # We are not on a branch. In the past we would add "+commitHHHH" to it, but this - # interferes with the CI system which checks out by tag name. The goal of the version - # numbers is to be reproducible, so we may want to put this back if we detect the - # current commit is not on the master branch. - - # n, result = getoutput('git rev-parse --short HEAD') - # name = name + '+commit' + result - - pass - else: - if result != 'master' and not re.match(r'^v\d+$', result): - name = name + '+' + result.replace('-', '') - - return name, numbers - - -def getoutput(cmd): - pipe = os.popen(cmd, 'r') - text = pipe.read().rstrip('\n') - status = pipe.close() or 0 - return status, text - if __name__ == '__main__': main() diff --git a/tests3/__init__.py b/tests/__init__.py similarity index 100% rename from tests3/__init__.py rename to tests/__init__.py diff --git a/tests3/accesstests.py b/tests/accesstests.py similarity index 100% rename from tests3/accesstests.py rename to tests/accesstests.py diff --git a/tests2/dbapi20.py b/tests/dbapi20.py old mode 100755 new mode 100644 similarity index 100% rename from tests2/dbapi20.py rename to tests/dbapi20.py diff --git a/tests3/dbapi_SQLParamData_memory__test.py b/tests/dbapi_SQLParamData_memory__test.py similarity index 100% rename from tests3/dbapi_SQLParamData_memory__test.py rename to tests/dbapi_SQLParamData_memory__test.py diff --git a/tests3/dbapi_SQLParamData_memory__test__requirements.txt b/tests/dbapi_SQLParamData_memory__test__requirements.txt similarity index 100% rename from tests3/dbapi_SQLParamData_memory__test__requirements.txt rename to tests/dbapi_SQLParamData_memory__test__requirements.txt diff --git a/tests3/dbapitests.py b/tests/dbapitests.py similarity index 100% rename from tests3/dbapitests.py rename to tests/dbapitests.py diff --git a/tests2/empty.accdb b/tests/empty.accdb similarity index 100% rename from tests2/empty.accdb rename to tests/empty.accdb diff --git a/tests2/empty.mdb b/tests/empty.mdb similarity index 100% rename from tests2/empty.mdb rename to tests/empty.mdb diff --git a/tests3/exceltests.py b/tests/exceltests.py similarity index 100% rename from tests3/exceltests.py rename to tests/exceltests.py diff --git a/tests3/informixtests.py b/tests/informixtests.py similarity index 100% rename from tests3/informixtests.py rename to tests/informixtests.py diff --git a/tests3/issue802.py b/tests/issue802.py similarity index 100% rename from tests3/issue802.py rename to tests/issue802.py diff --git a/tests3/issue998.py b/tests/issue998.py similarity index 100% rename from tests3/issue998.py rename to tests/issue998.py diff --git a/tests3/mysqltests.py b/tests/mysqltests.py similarity index 100% rename from tests3/mysqltests.py rename to tests/mysqltests.py diff --git a/tests3/run_tests.py b/tests/run_tests.py similarity index 100% rename from tests3/run_tests.py rename to tests/run_tests.py diff --git a/tests3/sparktests.py b/tests/sparktests.py similarity index 100% rename from tests3/sparktests.py rename to tests/sparktests.py diff --git a/tests3/sqldwtests.py b/tests/sqldwtests.py similarity index 100% rename from tests3/sqldwtests.py rename to tests/sqldwtests.py diff --git a/tests3/sqlitetests.py b/tests/sqlitetests.py similarity index 100% rename from tests3/sqlitetests.py rename to tests/sqlitetests.py diff --git a/tests3/sqlservertests.py b/tests/sqlservertests.py similarity index 100% rename from tests3/sqlservertests.py rename to tests/sqlservertests.py diff --git a/tests3/test.py b/tests/test.py similarity index 100% rename from tests3/test.py rename to tests/test.py diff --git a/tests/test_postgresql.py b/tests/test_postgresql.py new file mode 100644 index 00000000..4146ffaa --- /dev/null +++ b/tests/test_postgresql.py @@ -0,0 +1,591 @@ +""" +Unit tests for PostgreSQL +""" +# -*- coding: utf-8 -*- + +import uuid +from decimal import Decimal + +import pyodbc, pytest + + +def connect(autocommit=False, attrs_before=None): + return pyodbc.connect('DSN=pgtest', autocommit=autocommit, attrs_before=attrs_before) + + +@pytest.fixture +def cursor(): + cnxn = connect() + cur = cnxn.cursor() + + cur.execute("drop table if exists t1") + cur.execute("drop table if exists t2") + cur.execute("drop table if exists t3") + cnxn.commit() + + yield cur + + if not cnxn.closed: + cur.close() + cnxn.close() + + +def _generate_str(length, encoding=None): + """ + Returns either a string or bytes, depending on whether encoding is provided, + that is `length` elements long. + + If length is None, None is returned. This simplifies the tests by letting us put None into + an array of other lengths and pass them here, moving the special case check into one place. + """ + if length is None: + return None + + seed = '0123456789-abcdefghijklmnopqrstuvwxyz-' + + if length <= len(seed): + v = seed + else: + c = (length + len(seed) - 1 // len(seed)) + v = seed * c + + v = v[:length] + if encoding: + v = v.encode(encoding) + + return v + + +def test_text(cursor): + cursor.execute("create table t1(col text)") + + # Two different read code paths exist based on the length. Using 100 and 4000 will ensure + # both are tested. + for length in [None, 0, 100, 1000, 4000]: + cursor.execute("truncate table t1") + param = _generate_str(length) + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_text_many(cursor): + + # This shouldn't make a difference, but we'll ensure we can read and write from multiple + # columns at the same time. + + cursor.execute("create table t1(col1 text, col2 text, col3 text)") + + v1 = 'ABCDEFGHIJ' * 30 + v2 = '0123456789' * 30 + v3 = '9876543210' * 30 + + cursor.execute("insert into t1(col1, col2, col3) values (?,?,?)", v1, v2, v3) + row = cursor.execute("select col1, col2, col3 from t1").fetchone() + + assert v1 == row.col1 + assert v2 == row.col2 + assert v3 == row.col3 + + +def test_chinese(cursor): + v = '我的' + row = cursor.execute("SELECT N'我的' AS name").fetchone() + assert row[0] == v + + rows = cursor.execute("SELECT N'我的' AS name").fetchall() + assert rows[0][0] == v + + +def test_bytea(cursor): + cursor.execute("create table t1(col bytea)") + + for length in [None, 0, 100, 1000, 4000]: + cursor.execute("truncate table t1") + param = _generate_str(length, 'utf8') + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_bytearray(cursor): + """ + We will accept a bytearray and treat it like bytes, but when reading we'll still + get bytes back. + """ + cursor.execute("create table t1(col bytea)") + + # Two different read code paths exist based on the length. Using 100 and 4000 will ensure + # both are tested. + for length in [0, 100, 1000, 4000]: + cursor.execute("truncate table t1") + bytes = _generate_str(length, 'utf8') + param = bytearray(bytes) + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == bytes + + +def test_int(cursor): + cursor.execute("create table t1(col int)") + for param in [None, -1, 0, 1, 0x7FFFFFFF]: + cursor.execute("truncate table t1") + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_bigint(cursor): + cursor.execute("create table t1(col bigint)") + for param in [None, -1, 0, 1, 0x7FFFFFFF, 0xFFFFFFFF, 0x123456789]: + cursor.execute("truncate table t1") + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_float(cursor): + cursor.execute("create table t1(col float)") + for param in [None, -1, 0, 1, -200, 20000]: + cursor.execute("truncate table t1") + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_decimal(cursor): + cursor.execute("create table t1(col decimal(20,6))") + + # Note: Use strings to initialize the decimals to eliminate floating point rounding. + # + # Also, the ODBC docs show the value 100010 in the C struct, so I've included it here, + # along with a couple of shifted versions. + params = [Decimal(n) for n in "-1000.10 -1234.56 -1 0 1 1000.10 1234.56 100010 123456789.21".split()] + params.append(None) + + for param in params: + cursor.execute("truncate table t1") + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_numeric(cursor): + cursor.execute("create table t1(col numeric(20,6))") + + # Note: Use strings to initialize the decimals to eliminate floating point rounding. + params = [Decimal(n) for n in "-1234.56 -1 0 1 1234.56 123456789.21".split()] + params.append(None) + + for param in params: + cursor.execute("truncate table t1") + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_maxwrite(cursor): + # If we write more than `maxwrite` bytes, pyodbc will switch from binding the data all at + # once to providing it at execute time with SQLPutData. The default maxwrite is 1GB so + # this is rarely needed in PostgreSQL but I need to test the functionality somewhere. + cursor.connection.maxwrite = 300 + + cursor.execute("create table t1(col text)") + param = _generate_str(400) + cursor.execute("insert into t1 values (?)", param) + result = cursor.execute("select col from t1").fetchval() + assert result == param + + +def test_nonnative_uuid(cursor): + pyodbc.native_uuid = False + + param = uuid.uuid4() + cursor.execute("create table t1(n uuid)") + cursor.execute("insert into t1 values (?)", param) + + result = cursor.execute("select n from t1").fetchval() + assert isinstance(result, str) + assert result == str(param).upper() + + +def test_native_uuid(cursor): + pyodbc.native_uuid = True + # When true, we should return a uuid.UUID object. + + param = uuid.uuid4() + cursor.execute("create table t1(n uuid)") + cursor.execute("insert into t1 values (?)", param) + + result = cursor.execute("select n from t1").fetchval() + assert isinstance(result, uuid.UUID) + assert param == result + + +def test_close_cnxn(cursor): + """Make sure using a Cursor after closing its connection doesn't crash.""" + + cursor.execute("create table t1(id integer, s varchar(20))") + cursor.execute("insert into t1 values (?,?)", 1, 'test') + cursor.execute("select * from t1") + + cursor.connection.close() + + # Now that the connection is closed, we expect an exception. (If the code attempts to use + # the HSTMT, we'll get an access violation instead.) + + with pytest.raises(pyodbc.ProgrammingError): + cursor.execute("select * from t1") + + +def test_version(): + assert len(pyodbc.version.split('.')) == 3 + + +def test_rowcount(cursor): + assert cursor.rowcount == -1 + # The spec says it should be -1 when not in use. + + cursor.execute("create table t1(col int)") + count = 4 + for i in range(count): + cursor.execute("insert into t1 values (?)", i) + + cursor.execute("select * from t1") + assert cursor.rowcount == count + + cursor.execute("update t1 set col=col+1") + assert cursor.rowcount == count + + cursor.execute("delete from t1") + assert cursor.rowcount == count + + # This is a different code path - the value internally is SQL_NO_DATA instead of an empty + # result set. Just make sure it doesn't crash. + cursor.execute("delete from t1") + assert cursor.rowcount == 0 + + # IMPORTANT: The ODBC spec says it should be -1 after the create table, but the PostgreSQL + # driver is telling pyodbc the rowcount is 0. Since we have no way of knowing when to + # override it, we'll just update the test to ensure it is consistently zero. + + cursor.execute("create table t2(i int)") + assert cursor.rowcount == 0 + + +def test_row_description(cursor): + """ + Ensure Cursor.description is accessible as Row.cursor_description. + """ + cursor.execute("create table t1(col1 int, col2 char(3))") + cursor.execute("insert into t1 values(1, 'abc')") + + row = cursor.execute("select col1, col2 from t1").fetchone() + + assert row.cursor_description == cursor.description + + +def test_lower_case(cursor): + "Ensure pyodbc.lowercase forces returned column names to lowercase." + + try: + pyodbc.lowercase = True + + cursor.execute("create table t1(Abc int, dEf int)") + cursor.execute("select * from t1") + + names = {t[0] for t in cursor.description} + assert names == {'abc', 'def'} + finally: + pyodbc.lowercase = False + + +def test_executemany(cursor): + + cursor.execute("create table t1(col1 int, col2 varchar(10))") + params = [(i, str(i)) for i in range(1, 6)] + + # Without fast_executemany + + cursor.executemany("insert into t1(col1, col2) values (?,?)", params) + cursor.execute("select col1, col2 from t1 order by col1") + results = [tuple(row) for row in cursor] + assert results == params + + # With fast_executemany + + try: + pyodbc.fast_executemany = True + cursor.execute("truncate table t1") + cursor.executemany("insert into t1(col1, col2) values (?,?)", params) + cursor.execute("select col1, col2 from t1 order by col1") + results = [tuple(row) for row in cursor] + assert results == params + finally: + pyodbc.fast_executemany = False + + +def test_executemany_failure(cursor): + """ + Ensure that an exception is raised if one query in an executemany fails. + """ + cursor.execute("create table t1(a int, b varchar(10))") + + params = [ (1, 'good'), + ('error', 'not an int'), + (3, 'good') ] + + with pytest.raises(pyodbc.Error): + cursor.executemany("insert into t1(a, b) value (?, ?)", params) + + +def test_row_slicing(cursor): + cursor.execute("create table t1(a int, b int, c int, d int)") + cursor.execute("insert into t1 values(1,2,3,4)") + + row = cursor.execute("select * from t1").fetchone() + + result = row[:] + assert result is row # returned as is + + result = row[:-1] + assert result == (1, 2, 3) # returned as tuple + + result = row[0:4] + assert result is row + + +def test_drivers(): + p = pyodbc.drivers() + assert isinstance(p, list) + + +def test_datasources(): + p = pyodbc.dataSources() + assert isinstance(p, dict) + + +def test_getinfo_string(cursor): + value = cursor.connection.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) + assert isinstance(value, str) + + +def test_getinfo_bool(cursor): + value = cursor.connection.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) + assert isinstance(value, bool) + + +def test_getinfo_int(cursor): + value = cursor.connection.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) + assert isinstance(value, int) + + +def test_getinfo_smallint(cursor): + value = cursor.connection.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) + assert isinstance(value, int) + +def test_cnxn_execute_error(cursor): + """ + Make sure that Connection.execute (not Cursor) errors are not "eaten". + + GitHub issue #74 + """ + cursor.execute("create table t1(a int primary key)") + cursor.execute("insert into t1 values (1)") + with pytest.raises(pyodbc.Error): + cursor.connection.execute("insert into t1 values (1)") + +def test_row_repr(cursor): + cursor.execute("create table t1(a int, b int, c int, d int)") + cursor.execute("insert into t1 values(1,2,3,4)") + + row = cursor.execute("select * from t1").fetchone() + + result = str(row) + assert result == "(1, 2, 3, 4)" + + result = str(row[:-1]) + assert result == "(1, 2, 3)" + + result = str(row[:1]) + assert result == "(1,)" + + +def test_autocommit(cursor): + assert cursor.connection.autocommit is False + othercnxn = connect(autocommit=True) + assert othercnxn.autocommit is True + othercnxn.autocommit = False + assert othercnxn.autocommit is False + +def test_exc_integrity(cursor): + "Make sure an IntegretyError is raised" + # This is really making sure we are properly encoding and comparing the SQLSTATEs. + cursor.execute("create table t1(s1 varchar(10) primary key)") + cursor.execute("insert into t1 values ('one')") + with pytest.raises(pyodbc.IntegrityError): + cursor.execute("insert into t1 values ('one')") + + +def test_cnxn_set_attr_before(): + # I don't have a getattr right now since I don't have a table telling me what kind of + # value to expect. For now just make sure it doesn't crash. + # From the unixODBC sqlext.h header file. + SQL_ATTR_PACKET_SIZE = 112 + _cnxn = connect(attrs_before={ SQL_ATTR_PACKET_SIZE : 1024 * 32 }) + + +def test_cnxn_set_attr(cursor): + # I don't have a getattr right now since I don't have a table telling me what kind of + # value to expect. For now just make sure it doesn't crash. + # From the unixODBC sqlext.h header file. + SQL_ATTR_ACCESS_MODE = 101 + SQL_MODE_READ_ONLY = 1 + cursor.connection.set_attr(SQL_ATTR_ACCESS_MODE, SQL_MODE_READ_ONLY) + + +def test_columns(cursor): + driver_version = tuple( + int(x) for x in cursor.connection.getinfo(pyodbc.SQL_DRIVER_VER).split(".") + ) + + def _get_column_size(row): + # the driver changed the name of the returned columns in version 13.02. + # see https://odbc.postgresql.org/docs/release.html, release 13.02.0000, change 6. + return row.column_size if driver_version >= (13, 2, 0) else row.precision + + # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error + # + # Error: TypeError: argument 2 must be str, not None + # + # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an + # optional string keyword when calling indirectly. + + cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))") + + cursor.columns('t1') + results = {row.column_name: row for row in cursor} + row = results['a'] + assert row.type_name == 'int4', row.type_name + row = results['b'] + assert row.type_name == 'varchar' + assert _get_column_size(row) == 3, _get_column_size(row) + row = results['xΏz'] + assert row.type_name == 'varchar' + assert _get_column_size(row) == 4, _get_column_size(row) + + # Now do the same, but specifically pass in None to one of the keywords. Old versions + # were parsing arguments incorrectly and would raise an error. (This crops up when + # calling indirectly like columns(*args, **kwargs) which aiodbc does.) + + cursor.columns('t1', schema=None, catalog=None) + results = {row.column_name: row for row in cursor} + row = results['a'] + assert row.type_name == 'int4', row.type_name + row = results['b'] + assert row.type_name == 'varchar' + assert _get_column_size(row) == 3 + +def test_cancel(cursor): + # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with + # making sure SQLCancel is called correctly. + cursor.execute("select 1") + cursor.cancel() + +def test_emoticons_as_parameter(cursor): + # https://github.com/mkleehammer/pyodbc/issues/423 + # + # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number + # of characters. Ensure it works even with 4-byte characters. + # + # http://www.fileformat.info/info/unicode/char/1f31c/index.htm + + v = "x \U0001F31C z" + + cursor.execute("CREATE TABLE t1(s varchar(100))") + cursor.execute("insert into t1 values (?)", v) + + result = cursor.execute("select s from t1").fetchone()[0] + + assert result == v + +def test_emoticons_as_literal(cursor): + # https://github.com/mkleehammer/pyodbc/issues/630 + + v = "x \U0001F31C z" + + cursor.execute("CREATE TABLE t1(s varchar(100))") + cursor.execute(f"insert into t1 values ('{v}')") + + result = cursor.execute("select s from t1").fetchone()[0] + + assert result == v + + +def test_cursor_messages(cursor): + """ + Test the Cursor.messages attribute. + """ + # Using INFO message level because they are always sent to the client regardless of + + # client_min_messages: https://www.postgresql.org/docs/11/runtime-config-client.html + for msg in ('hello world', 'ABCDEFGHIJ' * 800): + cursor.execute(f""" + CREATE OR REPLACE PROCEDURE test_cursor_messages() + LANGUAGE plpgsql + AS $$ + BEGIN + RAISE INFO '{msg}' USING ERRCODE = '01000'; + END; + $$; + """) + cursor.execute("CALL test_cursor_messages();") + messages = cursor.messages + + # There is a maximum size for these so the second msg will actually generate a bunch of + # messages. To make it easier to compare, we'll stitch them back together. + + if len(messages) > 1: + concat = ''.join(t[1] for t in messages) + messages = [(messages[0][0], concat)] + + assert messages == [('[01000] (-1)', f'INFO: {msg}')] + + +def test_output_conversion(cursor): + # Note the use of SQL_WVARCHAR, not SQL_VARCHAR. + + def convert(value): + # The value is the raw bytes (as a bytes object) read from the + # database. We'll simply add an X at the beginning at the end. + return 'X' + value.decode('latin1') + 'X' + + cursor.execute("create table t1(n int, v varchar(10))") + cursor.execute("insert into t1 values (1, '123.45')") + + cursor.connection.add_output_converter(pyodbc.SQL_WVARCHAR, convert) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'X123.45X' + + # Clear all conversions and try again. There should be no Xs this time. + cursor.connection.clear_output_converters() + value = cursor.execute("select v from t1").fetchone()[0] + assert value == '123.45' + + # Same but clear using remove_output_converter. + cursor.connection.add_output_converter(pyodbc.SQL_WVARCHAR, convert) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'X123.45X' + + cursor.connection.remove_output_converter(pyodbc.SQL_WVARCHAR) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == '123.45' + + # And lastly, clear by passing None for the converter. + cursor.connection.add_output_converter(pyodbc.SQL_WVARCHAR, convert) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == 'X123.45X' + + cursor.connection.add_output_converter(pyodbc.SQL_WVARCHAR, None) + value = cursor.execute("select v from t1").fetchone()[0] + assert value == '123.45' diff --git a/tests2/testbase.py b/tests/testbase.py old mode 100755 new mode 100644 similarity index 100% rename from tests2/testbase.py rename to tests/testbase.py diff --git a/tests3/testutils.py b/tests/testutils.py similarity index 100% rename from tests3/testutils.py rename to tests/testutils.py diff --git a/tests2/accesstests.py b/tests2/accesstests.py deleted file mode 100755 index 2cd0fc55..00000000 --- a/tests2/accesstests.py +++ /dev/null @@ -1,671 +0,0 @@ -#!/usr/bin/python - -usage="""\ -usage: %prog [options] filename - -Unit tests for Microsoft Access - -These run using the version from the 'build' directory, not the version -installed into the Python directories. You must run python setup.py build -before running the tests. - -To run, pass the file EXTENSION of an Access database on the command line: - - accesstests accdb - -An empty Access 2000 database (empty.mdb) or an empty Access 2007 database -(empty.accdb), are automatically created for the tests. - -To run a single test, use the -t option: - - accesstests -t unicode_null accdb - -If you want to report an error, it would be helpful to include the driver information -by using the verbose flag and redirecting the output to a file: - - accesstests -v accdb >& results.txt - -You can pass the verbose flag twice for more verbose output: - - accesstests -vv accdb -""" - -# Access SQL data types: http://msdn2.microsoft.com/en-us/library/bb208866.aspx - -import sys, os, re -import unittest -from decimal import Decimal -from datetime import datetime, date, time -from os.path import abspath, dirname, join -import shutil -from testutils import * - -CNXNSTRING = None - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of composed of `seed` to make a string `length` characters long. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - - -class AccessTestCase(unittest.TestCase): - - SMALL_FENCEPOST_SIZES = [ 0, 1, 254, 255 ] # text fields <= 255 - LARGE_FENCEPOST_SIZES = [ 256, 270, 304, 508, 510, 511, 512, 1023, 1024, 2047, 2048, 4000, 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] - - ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] - UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] - IMAGE_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] - - def __init__(self, method_name): - unittest.TestCase.__init__(self, method_name) - - def setUp(self): - self.cnxn = pyodbc.connect(CNXNSTRING) - self.cursor = self.cnxn.cursor() - - # https://docs.microsoft.com/en-us/sql/odbc/microsoft/desktop-database-driver-performance-issues?view=sql-server-2017 - # - # As of the 4.0 drivers, you have to send as Unicode? - self.cnxn.setencoding(str, encoding='utf-16le') - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_multiple_bindings(self): - "More than one bind and select on a cursor" - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t1 values (?)", 2) - self.cursor.execute("insert into t1 values (?)", 3) - for i in range(3): - self.cursor.execute("select n from t1 where n < ?", 10) - self.cursor.execute("select n from t1 where n < 3") - - - def test_different_bindings(self): - self.cursor.execute("create table t1(n int)") - self.cursor.execute("create table t2(d datetime)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t2 values (?)", datetime.now()) - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - def _test_strtype(self, sqltype, value, resulttype=None, colsize=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize is None or (value is None or colsize >= len(value)), 'colsize=%s value=%s' % (colsize, (value is None) and 'none' or len(value)) - - if colsize: - sql = "create table t1(n1 int not null, s1 %s(%s), s2 %s(%s))" % (sqltype, colsize, sqltype, colsize) - else: - sql = "create table t1(n1 int not null, s1 %s, s2 %s)" % (sqltype, sqltype) - - if resulttype is None: - # Access only uses Unicode, but strings might have been passed in to see if they can be written. When we - # read them back, they'll be unicode, so compare our results to a Unicode version of `value`. - if type(value) is str: - resulttype = unicode - else: - resulttype = type(value) - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(1, ?, ?)", (value, value)) - v = self.cursor.execute("select s1, s2 from t1").fetchone()[0] - - if type(value) is not resulttype: - # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before - # comparing. - value = resulttype(value) - - self.assertEqual(type(v), resulttype) - - if value is not None: - self.assertEqual(len(v), len(value)) - - self.assertEqual(v, value) - - # - # unicode - # - - def test_unicode_null(self): - self._test_strtype('varchar', None, colsize=255) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, colsize=len(value)) - t.__doc__ = 'unicode %s' % len(value) - return t - for value in UNICODE_FENCEPOSTS: - locals()['test_unicode_%s' % len(value)] = _maketest(value) - - # - # ansi -> varchar - # - - # Access only stores Unicode text but it should accept ASCII text. - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, colsize=len(value)) - t.__doc__ = 'ansi %s' % len(value) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_ansivarchar_%s' % len(value)] = _maketest(value) - - # - # binary - # - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varbinary', buffer(value), colsize=len(value), resulttype=pyodbc.BINARY) - t.__doc__ = 'binary %s' % len(value) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_binary_%s' % len(value)] = _maketest(value) - - - # - # image - # - - def test_null_image(self): - self._test_strtype('image', None) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('image', buffer(value), resulttype=pyodbc.BINARY) - t.__doc__ = 'image %s' % len(value) - return t - for value in IMAGE_FENCEPOSTS: - locals()['test_image_%s' % len(value)] = _maketest(value) - - # - # memo - # - - def test_null_memo(self): - self._test_strtype('memo', None) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('memo', unicode(value)) - t.__doc__ = 'Unicode to memo %s' % len(value) - return t - for value in IMAGE_FENCEPOSTS: - locals()['test_memo_%s' % len(value)] = _maketest(value) - - # ansi -> memo - def _maketest(value): - def t(self): - self._test_strtype('memo', value) - t.__doc__ = 'ANSI to memo %s' % len(value) - return t - for value in IMAGE_FENCEPOSTS: - locals()['test_ansimemo_%s' % len(value)] = _maketest(value) - - def test_subquery_params(self): - """Ensure parameter markers work in a subquery""" - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - row = self.cursor.execute(""" - select x.id - from ( - select id - from t1 - where s = ? - and id between ? and ? - ) x - """, 'test', 1, 10).fetchone() - self.assertNotEqual(row, None) - self.assertEqual(row[0], 1) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - - def test_unicode_query(self): - self.cursor.execute(u"select 1") - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - # - # date, time, datetime - # - - def test_datetime(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(value, result) - - # - # ints and floats - # - - def test_int(self): - value = 1234 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_int(self): - value = -1 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_smallint(self): - value = 32767 - self.cursor.execute("create table t1(n smallint)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_real(self): - value = 1234.5 - self.cursor.execute("create table t1(n real)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_real(self): - value = -200.5 - self.cursor.execute("create table t1(n real)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - def test_float(self): - value = 1234.567 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_float(self): - value = -200.5 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - def test_tinyint(self): - self.cursor.execute("create table t1(n tinyint)") - value = 10 - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(type(result), type(value)) - self.assertEqual(value, result) - - # - # decimal & money - # - - def test_decimal(self): - value = Decimal('12345.6789') - self.cursor.execute("create table t1(n numeric(10,4))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - def test_money(self): - self.cursor.execute("create table t1(n money)") - value = Decimal('1234.45') - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(type(result), type(value)) - self.assertEqual(value, result) - - def test_negative_decimal_scale(self): - value = Decimal('-10.0010') - self.cursor.execute("create table t1(d numeric(19,4))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - # - # bit - # - - def test_bit(self): - self.cursor.execute("create table t1(b bit)") - - value = True - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select b from t1").fetchone()[0] - self.assertEqual(type(result), bool) - self.assertEqual(value, result) - - def test_bit_null(self): - self.cursor.execute("create table t1(b bit)") - - value = None - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select b from t1").fetchone()[0] - self.assertEqual(type(result), bool) - self.assertEqual(False, result) - - def test_guid(self): - value = u"de2ac9c6-8676-4b0b-b8a6-217a8580cbee" - self.cursor.execute("create table t1(g1 uniqueidentifier)") - self.cursor.execute("insert into t1 values (?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), type(value)) - self.assertEqual(len(v), len(value)) - - - # - # rowcount - # - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - """ - Ensure Cursor.rowcount is set properly after a select statement. - - pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a - select statement, so we'll test for that behavior. This is valid behavior according to the DB API - specification, but people don't seem to like it. - """ - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, -1) - - rows = self.cursor.fetchall() - self.assertEqual(len(rows), count) - self.assertEqual(self.cursor.rowcount, -1) - - def test_rowcount_reset(self): - "Ensure rowcount is reset to -1" - - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.assertEqual(self.cursor.rowcount, 1) - - self.cursor.execute("create table t2(i int)") - self.assertEqual(self.cursor.rowcount, -1) - - # - # Misc - # - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_failure(self): - """ - Ensure that an exception is raised if one query in an executemany fails. - """ - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, 'good'), - ('error', 'not an int'), - (3, 'good') ] - - self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_concatenation(self): - v2 = u'0123456789' * 25 - v3 = u'9876543210' * 25 - value = v2 + 'x' + v3 - - self.cursor.execute("create table t1(c2 varchar(250), c3 varchar(250))") - self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) - - row = self.cursor.execute("select c2 + 'x' + c3 from t1").fetchone() - - self.assertEqual(row[0], value) - - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - - othercnxn = pyodbc.connect(CNXNSTRING, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - -def main(): - from optparse import OptionParser - parser = OptionParser(usage=usage) - parser.add_option("-v", "--verbose", default=0, action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - - (options, args) = parser.parse_args() - - if len(args) != 1: - parser.error('dbfile argument required') - - if args[0].endswith('.accdb'): - driver = 'Microsoft Access Driver (*.mdb, *.accdb)' - drvext = 'accdb' - else: - driver = 'Microsoft Access Driver (*.mdb)' - drvext = 'mdb' - - here = dirname(abspath(__file__)) - src = join(here, 'empty.' + drvext) - dest = join(here, 'test.' + drvext) - shutil.copy(src, dest) - - global CNXNSTRING - CNXNSTRING = 'DRIVER={%s};DBQ=%s;ExtendedAnsiSQL=1' % (driver, dest) - print(CNXNSTRING) - - if options.verbose: - cnxn = pyodbc.connect(CNXNSTRING) - print_library_info(cnxn) - cnxn.close() - - suite = load_tests(AccessTestCase, options.test) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - add_to_path() - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/dbapitests.py b/tests2/dbapitests.py deleted file mode 100755 index a2fd8c2e..00000000 --- a/tests2/dbapitests.py +++ /dev/null @@ -1,45 +0,0 @@ -import sys -import unittest -from testutils import * -import dbapi20 - -def main(): - add_to_path() - import pyodbc - - from optparse import OptionParser - parser = OptionParser(usage="usage: %prog [options] connection_string") - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - - (options, args) = parser.parse_args() - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - connection_string = load_setup_connection_string('dbapitests') - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - class test_pyodbc(dbapi20.DatabaseAPI20Test): - driver = pyodbc - connect_args = [ connection_string ] - connect_kw_args = {} - - def test_nextset(self): pass - def test_setoutputsize(self): pass - def test_ExceptionsAsConnectionAttributes(self): pass - - suite = unittest.makeSuite(test_pyodbc, 'test') - testRunner = unittest.TextTestRunner(verbosity=(options.verbose > 1) and 9 or 0) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/exceltests.py b/tests2/exceltests.py deleted file mode 100755 index 10062b0c..00000000 --- a/tests2/exceltests.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python - -# Tests for reading from Excel files. -# -# I have not been able to successfully create or modify Excel files. - -import sys, os, re -import unittest -from os.path import abspath -from testutils import * - -CNXNSTRING = None - -class ExcelTestCase(unittest.TestCase): - - def __init__(self, method_name): - unittest.TestCase.__init__(self, method_name) - - def setUp(self): - self.cnxn = pyodbc.connect(CNXNSTRING, autocommit=True) - self.cursor = self.cnxn.cursor() - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - - def test_read_sheet(self): - # The first method of reading data is to access worksheets by name in this format [name$]. - # - # Our second sheet is named Sheet2 and has two columns. The first has values 10, 20, 30, etc. - - rows = self.cursor.execute("select * from [Sheet2$]").fetchall() - self.assertEqual(len(rows), 5) - - for index, row in enumerate(rows): - self.assertEqual(row.s2num, float(index + 1) * 10) - - def test_read_range(self): - # The second method of reading data is to assign a name to a range of cells and access that as a table. - # - # Our first worksheet has a section named Table1. The first column has values 1, 2, 3, etc. - - rows = self.cursor.execute("select * from Table1").fetchall() - self.assertEqual(len(rows), 10) - - for index, row in enumerate(rows): - self.assertEqual(row.num, float(index + 1)) - self.assertEqual(row.val, chr(ord('a') + index)) - - def test_tables(self): - # This is useful for figuring out what is available - tables = [ row.table_name for row in self.cursor.tables() ] - assert 'Sheet2$' in tables, 'tables: %s' % ' '.join(tables) - - - # def test_append(self): - # rows = self.cursor.execute("select s2num, s2val from [Sheet2$]").fetchall() - # - # print rows - # - # nextnum = max([ row.s2num for row in rows ]) + 10 - # - # self.cursor.execute("insert into [Sheet2$](s2num, s2val) values (?, 'z')", nextnum) - # - # row = self.cursor.execute("select s2num, s2val from [Sheet2$] where s2num=?", nextnum).fetchone() - # self.assertTrue(row) - # - # print 'added:', nextnum, len(rows), 'rows' - # - # self.assertEqual(row.s2num, nextnum) - # self.assertEqual(row.s2val, 'z') - # - # self.cnxn.commit() - - -def main(): - from optparse import OptionParser - parser = OptionParser() #usage=usage) - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - - (options, args) = parser.parse_args() - - if args: - parser.error('no arguments expected') - - global CNXNSTRING - - path = dirname(abspath(__file__)) - filename = join(path, 'test.xls') - assert os.path.exists(filename) - CNXNSTRING = 'Driver={Microsoft Excel Driver (*.xls)};DBQ=%s;READONLY=FALSE' % filename - - if options.verbose: - cnxn = pyodbc.connect(CNXNSTRING, autocommit=True) - print_library_info(cnxn) - cnxn.close() - - suite = load_tests(ExcelTestCase, options.test) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - add_to_path() - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/informixtests.py b/tests2/informixtests.py deleted file mode 100755 index 36525e90..00000000 --- a/tests2/informixtests.py +++ /dev/null @@ -1,1275 +0,0 @@ -#!/usr/bin/python -# -*- coding: latin-1 -*- - -usage = """\ -usage: %prog [options] connection_string - -Unit tests for Informix DB. To use, pass a connection string as the parameter. -The tests will create and drop tables t1 and t2 as necessary. - -These run using the version from the 'build' directory, not the version -installed into the Python directories. You must run python setup.py build -before running the tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [informixtests] - connection-string=DRIVER={IBM INFORMIX ODBC DRIVER (64-bit)};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db -""" - -import sys, os, re -import unittest -from decimal import Decimal -from datetime import datetime, date, time -from os.path import join, getsize, dirname, abspath -from testutils import * - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class InformixTestCase(unittest.TestCase): - - SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] - LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] - - ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] - UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] - IMAGE_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] - - def __init__(self, method_name, connection_string): - unittest.TestCase.__init__(self, method_name) - self.connection_string = connection_string - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string) - self.cursor = self.cnxn.cursor() - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - for i in range(3): - try: - self.cursor.execute("drop procedure proc%d" % i) - self.cnxn.commit() - except: - pass - - try: - self.cursor.execute('drop function func1') - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_multiple_bindings(self): - "More than one bind and select on a cursor" - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t1 values (?)", 2) - self.cursor.execute("insert into t1 values (?)", 3) - for i in range(3): - self.cursor.execute("select n from t1 where n < ?", 10) - self.cursor.execute("select n from t1 where n < 3") - - - def test_different_bindings(self): - self.cursor.execute("create table t1(n int)") - self.cursor.execute("create table t2(d datetime)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t2 values (?)", datetime.now()) - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - def test_noscan(self): - self.assertEqual(self.cursor.noscan, False) - self.cursor.noscan = True - self.assertEqual(self.cursor.noscan, True) - - def test_guid(self): - self.cursor.execute("create table t1(g1 uniqueidentifier)") - self.cursor.execute("insert into t1 values (newid())") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(len(v), 36) - - def test_nextset(self): - self.cursor.execute("create table t1(i int)") - for i in range(4): - self.cursor.execute("insert into t1(i) values(?)", i) - - self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") - - for i, row in enumerate(self.cursor): - self.assertEqual(i, row.i) - - self.assertEqual(self.cursor.nextset(), True) - - for i, row in enumerate(self.cursor): - self.assertEqual(i + 2, row.i) - - def test_fixed_unicode(self): - value = u"t\xebsting" - self.cursor.execute("create table t1(s nchar(7))") - self.cursor.execute("insert into t1 values(?)", u"t\xebsting") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), unicode) - self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL - self.assertEqual(v, value) - - - def _test_strtype(self, sqltype, value, colsize=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), type(value)) - - if value is not None: - self.assertEqual(len(v), len(value)) - - self.assertEqual(v, value) - - # Reported by Andy Hochhaus in the pyodbc group: In 2.1.7 and earlier, a hardcoded length of 255 was used to - # determine whether a parameter was bound as a SQL_VARCHAR or SQL_LONGVARCHAR. Apparently SQL Server chokes if - # we bind as a SQL_LONGVARCHAR and the target column size is 8000 or less, which is considers just SQL_VARCHAR. - # This means binding a 256 character value would cause problems if compared with a VARCHAR column under - # 8001. We now use SQLGetTypeInfo to determine the time to switch. - # - # [42000] [Microsoft][SQL Server Native Client 10.0][SQL Server]The data types varchar and text are incompatible in the equal to operator. - - self.cursor.execute("select * from t1 where s=?", value) - - - def _test_strliketype(self, sqltype, value, colsize=None): - """ - The implementation for text, image, ntext, and binary. - - These types do not support comparison operators. - """ - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), type(value)) - - if value is not None: - self.assertEqual(len(v), len(value)) - - self.assertEqual(v, value) - - - # - # varchar - # - - def test_varchar_null(self): - self._test_strtype('varchar', None, 100) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, len(value)) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_varchar_%s' % len(value)] = _maketest(value) - - def test_varchar_many(self): - self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") - - v1 = 'ABCDEFGHIJ' * 30 - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); - row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() - - self.assertEqual(v1, row.c1) - self.assertEqual(v2, row.c2) - self.assertEqual(v3, row.c3) - - def test_varchar_upperlatin(self): - self._test_strtype('varchar', '') - - # - # unicode - # - - def test_unicode_null(self): - self._test_strtype('nvarchar', None, 100) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, len(value)) - return t - for value in UNICODE_FENCEPOSTS: - locals()['test_unicode_%s' % len(value)] = _maketest(value) - - def test_unicode_upperlatin(self): - self._test_strtype('varchar', '') - - # - # binary - # - - def test_null_binary(self): - self._test_strtype('varbinary', None, 100) - - def test_large_null_binary(self): - # Bug 1575064 - self._test_strtype('varbinary', None, 4000) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varbinary', buffer(value), len(value)) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_binary_%s' % len(value)] = _maketest(value) - - # - # image - # - - def test_image_null(self): - self._test_strliketype('image', None) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strliketype('image', buffer(value)) - return t - for value in IMAGE_FENCEPOSTS: - locals()['test_image_%s' % len(value)] = _maketest(value) - - def test_image_upperlatin(self): - self._test_strliketype('image', buffer('')) - - # - # text - # - - # def test_empty_text(self): - # self._test_strliketype('text', buffer('')) - - def test_null_text(self): - self._test_strliketype('text', None) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strliketype('text', value) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_text_%s' % len(value)] = _maketest(value) - - def test_text_upperlatin(self): - self._test_strliketype('text', '') - - # - # bit - # - - def test_bit(self): - value = True - self.cursor.execute("create table t1(b bit)") - self.cursor.execute("insert into t1 values (?)", value) - v = self.cursor.execute("select b from t1").fetchone()[0] - self.assertEqual(type(v), bool) - self.assertEqual(v, value) - - # - # decimal - # - - def _decimal(self, precision, scale, negative): - # From test provided by planders (thanks!) in Issue 91 - - self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) - - # Construct a decimal that uses the maximum precision and scale. - decStr = '9' * (precision - scale) - if scale: - decStr = decStr + "." + '9' * scale - if negative: - decStr = "-" + decStr - value = Decimal(decStr) - - self.cursor.execute("insert into t1 values(?)", value) - - v = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(v, value) - - def _maketest(p, s, n): - def t(self): - self._decimal(p, s, n) - return t - for (p, s, n) in [ (1, 0, False), - (1, 0, True), - (6, 0, False), - (6, 2, False), - (6, 4, True), - (6, 6, True), - (38, 0, False), - (38, 10, False), - (38, 38, False), - (38, 0, True), - (38, 10, True), - (38, 38, True) ]: - locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) - - - def test_decimal_e(self): - """Ensure exponential notation decimals are properly handled""" - value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 - self.cursor.execute("create table t1(d decimal(10, 2))") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_subquery_params(self): - """Ensure parameter markers work in a subquery""" - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - row = self.cursor.execute(""" - select x.id - from ( - select id - from t1 - where s = ? - and id between ? and ? - ) x - """, 'test', 1, 10).fetchone() - self.assertNotEqual(row, None) - self.assertEqual(row[0], 1) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_string(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_fixed_str(self): - value = "testing" - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL - self.assertEqual(v, value) - - def test_empty_unicode(self): - self.cursor.execute("create table t1(s nvarchar(20))") - self.cursor.execute("insert into t1 values(?)", u"") - - def test_unicode_query(self): - self.cursor.execute(u"select 1") - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - # - # date, time, datetime - # - - def test_datetime(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(value), datetime) - self.assertEqual(value, result) - - def test_datetime_fraction(self): - # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime - # supported is xxx000. - - value = datetime(2007, 1, 15, 3, 4, 5, 123000) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(value), datetime) - self.assertEqual(result, value) - - def test_datetime_fraction_rounded(self): - # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the - # database supports. - - full = datetime(2007, 1, 15, 3, 4, 5, 123456) - rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", full) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(result, rounded) - - def test_date(self): - value = date.today() - - self.cursor.execute("create table t1(d date)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(type(value), date) - self.assertEqual(value, result) - - def test_time(self): - value = datetime.now().time() - - # We aren't yet writing values using the new extended time type so the value written to the database is only - # down to the second. - value = value.replace(microsecond=0) - - self.cursor.execute("create table t1(t time)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select t from t1").fetchone()[0] - self.assertEqual(type(value), time) - self.assertEqual(value, result) - - def test_datetime2(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime2)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(value), datetime) - self.assertEqual(value, result) - - # - # ints and floats - # - - def test_int(self): - value = 1234 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_int(self): - value = -1 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_bigint(self): - input = 3000000000 - self.cursor.execute("create table t1(d bigint)") - self.cursor.execute("insert into t1 values (?)", input) - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(result, input) - - def test_float(self): - value = 1234.567 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - - # - # stored procedures - # - - # def test_callproc(self): - # "callproc with a simple input-only stored procedure" - # pass - - def test_sp_results(self): - self.cursor.execute( - """ - Create procedure proc1 - AS - select top 10 name, id, xtype, refdate - from sysobjects - """) - rows = self.cursor.execute("exec proc1").fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - - def test_sp_results_from_temp(self): - - # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. - # If you don't do this, you'd need to call nextset() once to skip it. - - self.cursor.execute( - """ - Create procedure proc1 - AS - set nocount on - select top 10 name, id, xtype, refdate - into #tmptable - from sysobjects - - select * from #tmptable - """) - self.cursor.execute("exec proc1") - self.assertTrue(self.cursor.description is not None) - self.assertTrue(len(self.cursor.description) == 4) - - rows = self.cursor.fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - - def test_sp_results_from_vartbl(self): - self.cursor.execute( - """ - Create procedure proc1 - AS - set nocount on - declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime) - - insert into @tmptbl - select top 10 name, id, xtype, refdate - from sysobjects - - select * from @tmptbl - """) - self.cursor.execute("exec proc1") - rows = self.cursor.fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - def test_sp_with_dates(self): - # Reported in the forums that passing two datetimes to a stored procedure doesn't work. - self.cursor.execute( - """ - if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) - drop procedure [dbo].[test_sp] - """) - self.cursor.execute( - """ - create procedure test_sp(@d1 datetime, @d2 datetime) - AS - declare @d as int - set @d = datediff(year, @d1, @d2) - select @d - """) - self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(rows[0][0] == 0) # 0 years apart - - def test_sp_with_none(self): - # Reported in the forums that passing None caused an error. - self.cursor.execute( - """ - if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) - drop procedure [dbo].[test_sp] - """) - self.cursor.execute( - """ - create procedure test_sp(@x varchar(20)) - AS - declare @y varchar(20) - set @y = @x - select @y - """) - self.cursor.execute("exec test_sp ?", None) - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(rows[0][0] == None) # 0 years apart - - - # - # rowcount - # - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - """ - Ensure Cursor.rowcount is set properly after a select statement. - - pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a - select statement, so we'll test for that behavior. This is valid behavior according to the DB API - specification, but people don't seem to like it. - """ - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, -1) - - rows = self.cursor.fetchall() - self.assertEqual(len(rows), count) - self.assertEqual(self.cursor.rowcount, -1) - - def test_rowcount_reset(self): - "Ensure rowcount is reset to -1" - - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.assertEqual(self.cursor.rowcount, 1) - - self.cursor.execute("create table t2(i int)") - self.assertEqual(self.cursor.rowcount, -1) - - # - # always return Cursor - # - - # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very - # confusing when things went wrong and added very little value even when things went right since users could always - # use: cursor.execute("...").rowcount - - def test_retcursor_delete(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_select(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("select * from t1") - self.assertEqual(v, self.cursor) - - # - # misc - # - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_temp_select(self): - # A project was failing to create temporary tables via select into. - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(v, "testing") - - self.cursor.execute("select s into t2 from t1") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), str) - self.assertEqual(v, "testing") - - - def test_money(self): - d = Decimal('123456.78') - self.cursor.execute("create table t1(i int identity(1,1), m money)") - self.cursor.execute("insert into t1(m) values (?)", d) - v = self.cursor.execute("select m from t1").fetchone()[0] - self.assertEqual(v, d) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_one(self): - "Pass executemany a single sequence" - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, "test") ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_failure(self): - """ - Ensure that an exception is raised if one query in an executemany fails. - """ - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, 'good'), - ('error', 'not an int'), - (3, 'good') ] - - self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_concatenation(self): - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") - self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) - - row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() - - self.assertEqual(row.both, v2 + v3) - - def test_view_select(self): - # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. - - # Create a table (t1) with 3 rows and a view (t2) into it. - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") - for i in range(3): - self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) - self.cursor.execute("create view t2 as select * from t1") - - # Select from the view - self.cursor.execute("select * from t2") - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(len(rows) == 3) - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - def test_unicode_results(self): - "Ensure unicode_results forces Unicode" - othercnxn = pyodbc.connect(self.connection_string, unicode_results=True) - othercursor = othercnxn.cursor() - - # ANSI data in an ANSI column ... - othercursor.execute("create table t1(s varchar(20))") - othercursor.execute("insert into t1 values(?)", 'test') - - # ... should be returned as Unicode - value = othercursor.execute("select s from t1").fetchone()[0] - self.assertEqual(value, u'test') - - - def test_informix_callproc(self): - try: - self.cursor.execute("drop procedure pyodbctest") - self.cnxn.commit() - except: - pass - - self.cursor.execute("create table t1(s varchar(10))") - self.cursor.execute("insert into t1 values(?)", "testing") - - self.cursor.execute(""" - create procedure pyodbctest @var1 varchar(32) - as - begin - select s - from t1 - return - end - """) - self.cnxn.commit() - - # for row in self.cursor.procedureColumns('pyodbctest'): - # print row.procedure_name, row.column_name, row.column_type, row.type_name - - self.cursor.execute("exec pyodbctest 'hi'") - - # print self.cursor.description - # for row in self.cursor: - # print row.s - - def test_skip(self): - # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. - - self.cursor.execute("create table t1(id int)"); - for i in range(1, 5): - self.cursor.execute("insert into t1 values(?)", i) - self.cursor.execute("select id from t1 order by id") - self.assertEqual(self.cursor.fetchone()[0], 1) - self.cursor.skip(2) - self.assertEqual(self.cursor.fetchone()[0], 4) - - def test_timeout(self): - self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) - - self.cnxn.timeout = 30 - self.assertEqual(self.cnxn.timeout, 30) - - self.cnxn.timeout = 0 - self.assertEqual(self.cnxn.timeout, 0) - - def test_sets_execute(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) - - self.assertRaises(pyodbc.ProgrammingError, f) - - def test_sets_executemany(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.executemany("insert into t1 (word) values (?)", [words]) - - self.assertRaises(TypeError, f) - - def test_row_execute(self): - "Ensure we can use a Row object as a parameter to execute" - self.cursor.execute("create table t1(n int, s varchar(10))") - self.cursor.execute("insert into t1 values (1, 'a')") - row = self.cursor.execute("select n, s from t1").fetchone() - self.assertNotEqual(row, None) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.execute("insert into t2 values (?, ?)", row) - - def test_row_executemany(self): - "Ensure we can use a Row object as a parameter to executemany" - self.cursor.execute("create table t1(n int, s varchar(10))") - - for i in range(3): - self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) - - rows = self.cursor.execute("select n, s from t1").fetchall() - self.assertNotEqual(len(rows), 0) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.executemany("insert into t2 values (?, ?)", rows) - - def test_description(self): - "Ensure cursor.description is correct" - - self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") - self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") - self.cursor.execute("select * from t1") - - # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the - # items I do know. - - # int - t = self.cursor.description[0] - self.assertEqual(t[0], 'n') - self.assertEqual(t[1], int) - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # varchar(8) - t = self.cursor.description[1] - self.assertEqual(t[0], 's') - self.assertEqual(t[1], str) - self.assertEqual(t[4], 8) # precision - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # decimal(5, 2) - t = self.cursor.description[2] - self.assertEqual(t[0], 'd') - self.assertEqual(t[1], Decimal) - self.assertEqual(t[4], 5) # precision - self.assertEqual(t[5], 2) # scale - self.assertEqual(t[6], True) # nullable - - - def test_none_param(self): - "Ensure None can be used for params other than the first" - # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used - # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the - # first column, but did not work for later columns. - # - # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, - # binary/varbinary won't allow an implicit conversion. - - self.cursor.execute("create table t1(n int, blob varbinary(max))") - self.cursor.execute("insert into t1 values (1, newid())") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row.n, 1) - self.assertEqual(type(row.blob), buffer) - - self.cursor.execute("update t1 set n=?, blob=?", 2, None) - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row.n, 2) - self.assertEqual(row.blob, None) - - - def test_output_conversion(self): - def convert(value): - # `value` will be a string. We'll simply add an X at the beginning at the end. - return 'X' + value + 'X' - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) - self.cursor.execute("create table t1(n int, v varchar(10))") - self.cursor.execute("insert into t1 values (1, '123.45')") - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - - # Now clear the conversions and try again. There should be no Xs this time. - self.cnxn.clear_output_converters() - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - - - def test_too_large(self): - """Ensure error raised if insert fails due to truncation""" - value = 'x' * 1000 - self.cursor.execute("create table t1(s varchar(800))") - def test(): - self.cursor.execute("insert into t1 values (?)", value) - self.assertRaises(pyodbc.DataError, test) - - def test_geometry_null_insert(self): - def convert(value): - return value - - self.cnxn.add_output_converter(-151, convert) # -151 is SQL Server's geometry - self.cursor.execute("create table t1(n int, v geometry)") - self.cursor.execute("insert into t1 values (?, ?)", 1, None) - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, None) - self.cnxn.clear_output_converters() - - def test_login_timeout(self): - # This can only test setting since there isn't a way to cause it to block on the server side. - cnxns = pyodbc.connect(self.connection_string, timeout=2) - - def test_row_equal(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test')") - row1 = self.cursor.execute("select n, s from t1").fetchone() - row2 = self.cursor.execute("select n, s from t1").fetchone() - b = (row1 == row2) - self.assertEqual(b, True) - - def test_row_gtlt(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test1')") - self.cursor.execute("insert into t1 values (1, 'test2')") - rows = self.cursor.execute("select n, s from t1 order by s").fetchall() - self.assertTrue(rows[0] < rows[1]) - self.assertTrue(rows[0] <= rows[1]) - self.assertTrue(rows[1] > rows[0]) - self.assertTrue(rows[1] >= rows[0]) - self.assertTrue(rows[0] != rows[1]) - - rows = list(rows) - rows.sort() # uses < - - def test_context_manager(self): - with pyodbc.connect(self.connection_string) as cnxn: - cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - - # The connection should be closed now. - def test(): - cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertRaises(pyodbc.ProgrammingError, test) - - def test_untyped_none(self): - # From issue 129 - value = self.cursor.execute("select ?", None).fetchone()[0] - self.assertEqual(value, None) - - def test_large_update_nodata(self): - self.cursor.execute('create table t1(a varbinary(max))') - hundredkb = buffer('x'*100*1024) - self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) - - def test_func_param(self): - self.cursor.execute(''' - create function func1 (@testparam varchar(4)) - returns @rettest table (param varchar(4)) - as - begin - insert @rettest - select @testparam - return - end - ''') - self.cnxn.commit() - value = self.cursor.execute("select * from func1(?)", 'test').fetchone()[0] - self.assertEqual(value, 'test') - - def test_no_fetch(self): - # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to - # confuse the driver. - self.cursor.execute('select 1') - self.cursor.execute('select 1') - self.cursor.execute('select 1') - - def test_drivers(self): - drivers = pyodbc.drivers() - self.assertEqual(list, type(drivers)) - self.assertTrue(len(drivers) > 1) - - m = re.search('DRIVER={?([^}]+?)}?;', self.connection_string, re.IGNORECASE) - current = m.group(1) - self.assertTrue(current in drivers) - - def test_prepare_cleanup(self): - # When statement is prepared, it is kept in case the next execute uses the same statement. This must be - # removed when a non-execute statement is used that returns results, such as SQLTables. - - self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") - self.cursor.fetchone() - - self.cursor.tables("bogus") - - self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") - self.cursor.fetchone() - - -def main(): - from optparse import OptionParser - parser = OptionParser(usage=usage) - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - - (options, args) = parser.parse_args() - - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - connection_string = load_setup_connection_string('informixtests') - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - if options.verbose: - cnxn = pyodbc.connect(connection_string) - print_library_info(cnxn) - cnxn.close() - - suite = load_tests(InformixTestCase, options.test, connection_string) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - - add_to_path() - - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/mysqltests.py b/tests2/mysqltests.py deleted file mode 100755 index d756c00c..00000000 --- a/tests2/mysqltests.py +++ /dev/null @@ -1,762 +0,0 @@ -#!/usr/bin/python -# -*- coding: latin-1 -*- - -usage = """\ -usage: %prog [options] connection_string - -Unit tests for MySQL. To use, pass a connection string as the parameter. -The tests will create and drop tables t1 and t2 as necessary. - -These tests use the pyodbc library from the build directory, not the version installed in your -Python directories. You must run `python setup.py build` before running these tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [mysqltests] - connection-string=DRIVER=MySQL ODBC 8.0 ANSI Driver;charset=utf8mb4;SERVER=localhost;DATABASE=pyodbc;UID=root;PWD=rootpw - -Note: Use the "ANSI" (not the "Unicode") driver and include charset=utf8mb4 in the connection string so the high-Unicode tests won't fail. -""" - -import sys, os, re -import unittest -from decimal import Decimal -from datetime import datetime, date, time -from os.path import join, getsize, dirname, abspath, basename -from testutils import * - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of composed of `seed` to make a string `length` characters long. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class MySqlTestCase(unittest.TestCase): - - SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] - LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] - - ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] - UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] - BLOB_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] - - def __init__(self, method_name, connection_string): - unittest.TestCase.__init__(self, method_name) - self.connection_string = connection_string - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string) - self.cursor = self.cnxn.cursor() - - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') - self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') - self.cnxn.setencoding(str, encoding='utf-8') - self.cnxn.setencoding(unicode, encoding='utf-8', ctype=pyodbc.SQL_CHAR) - - # As of libmyodbc5w 5.3 SQLGetTypeInfo returns absurdly small sizes - # leading to slow writes. Override them: - self.cnxn.maxwrite = 1024 * 1024 * 1024 - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - for i in range(3): - try: - self.cursor.execute("drop procedure proc%d" % i) - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_multiple_bindings(self): - "More than one bind and select on a cursor" - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t1 values (?)", 2) - self.cursor.execute("insert into t1 values (?)", 3) - for i in range(3): - self.cursor.execute("select n from t1 where n < ?", 10) - self.cursor.execute("select n from t1 where n < 3") - - - def test_different_bindings(self): - self.cursor.execute("create table t1(n int)") - self.cursor.execute("create table t2(d datetime)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t2 values (?)", datetime.now()) - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - def _test_strtype(self, sqltype, value, colsize=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - try: - self.cursor.execute(sql) - except: - print '>>>>', sql - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - - # Removing this check for now until I get the charset working properly. - # If we use latin1, results are 'str' instead of 'unicode', which would be - # correct. Setting charset to ucs-2 causes a crash in SQLGetTypeInfo(SQL_DATETIME). - # self.assertEqual(type(v), type(value)) - - if value is not None: - self.assertEqual(len(v), len(value)) - - self.assertEqual(v, value) - - def test_raw_encoding(self): - # Read something that is valid ANSI and make sure it comes through. - # The database is actually going to send us UTF-8 so don't use extended - # characters. - # - # REVIEW: Is there a good way to write UTF-8 into the database and read - # it out? - self.cnxn.setencoding(str, encoding='raw') - - expected = "testing" - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values (?)", expected) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(result, expected) - - def test_raw_decoding(self): - # Read something that is valid ANSI and make sure it comes through. - # The database is actually going to send us UTF-8 so don't use extended - # characters. - # - # REVIEW: Is there a good way to write UTF-8 into the database and read - # it out? - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='raw') - self._test_strtype('varchar', _TESTSTR, 100) - - # - # varchar - # - - def test_varchar_null(self): - self._test_strtype('varchar', None, 100) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, max(1, len(value))) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_varchar_%s' % len(value)] = _maketest(value) - - # Generate a test using Unicode. - for value in UNICODE_FENCEPOSTS: - locals()['test_wvarchar_%s' % len(value)] = _maketest(value) - - def test_varchar_many(self): - self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") - - v1 = 'ABCDEFGHIJ' * 30 - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); - row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() - - self.assertEqual(v1, row.c1) - self.assertEqual(v2, row.c2) - self.assertEqual(v3, row.c3) - - def test_varchar_upperlatin(self): - self._test_strtype('varchar', u'', colsize=3) - - # - # binary - # - - def test_null_binary(self): - self._test_strtype('varbinary', None, 100) - - def test_large_null_binary(self): - # Bug 1575064 - self._test_strtype('varbinary', None, 4000) - - # Generate a test for each fencepost size: test_binary_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varbinary', bytearray(value), max(1, len(value))) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_binary_%s' % len(value)] = _maketest(value) - - # - # blob - # - - def test_blob_null(self): - self._test_strtype('blob', None) - - # Generate a test for each fencepost size: test_blob_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('blob', bytearray(value)) - return t - for value in BLOB_FENCEPOSTS: - locals()['test_blob_%s' % len(value)] = _maketest(value) - - def test_blob_upperlatin(self): - self._test_strtype('blob', bytearray('')) - - # - # text - # - - def test_null_text(self): - self._test_strtype('text', None) - - # Generate a test for each fencepost size: test_text_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('text', value) - return t - for value in ANSI_FENCEPOSTS: - locals()['test_text_%s' % len(value)] = _maketest(value) - - def test_text_upperlatin(self): - self._test_strtype('text', u'') - - # - # unicode - # - - def test_unicode_query(self): - self.cursor.execute(u"select 1") - - # - # bit - # - - # The MySQL driver maps BIT colums to the ODBC bit data type, but they aren't behaving quite like a Boolean value - # (which is what the ODBC bit data type really represents). The MySQL BOOL data type is just an alias for a small - # integer, so pyodbc can't recognize it and map it back to True/False. - # - # You can use both BIT and BOOL and they will act as you expect if you treat them as integers. You can write 0 and - # 1 to them and they will work. - - # def test_bit(self): - # value = True - # self.cursor.execute("create table t1(b bit)") - # self.cursor.execute("insert into t1 values (?)", value) - # v = self.cursor.execute("select b from t1").fetchone()[0] - # self.assertEqual(type(v), bool) - # self.assertEqual(v, value) - # - # def test_bit_string_true(self): - # self.cursor.execute("create table t1(b bit)") - # self.cursor.execute("insert into t1 values (?)", "xyzzy") - # v = self.cursor.execute("select b from t1").fetchone()[0] - # self.assertEqual(type(v), bool) - # self.assertEqual(v, True) - # - # def test_bit_string_false(self): - # self.cursor.execute("create table t1(b bit)") - # self.cursor.execute("insert into t1 values (?)", "") - # v = self.cursor.execute("select b from t1").fetchone()[0] - # self.assertEqual(type(v), bool) - # self.assertEqual(v, False) - - # - # decimal - # - - def test_small_decimal(self): - # value = Decimal('1234567890987654321') - value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) - self.cursor.execute("create table t1(d numeric(19))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def test_small_decimal_scale(self): - # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation - # example in the C Data Types appendix. - value = '1000.10' - value = Decimal(value) - self.cursor.execute("create table t1(d numeric(20,6))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def test_negative_decimal_scale(self): - value = Decimal('-10.0010') - self.cursor.execute("create table t1(d numeric(19,4))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - def test_subquery_params(self): - """Ensure parameter markers work in a subquery""" - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - row = self.cursor.execute(""" - select x.id - from ( - select id - from t1 - where s = ? - and id between ? and ? - ) x - """, 'test', 1, 10).fetchone() - self.assertNotEqual(row, None) - self.assertEqual(row[0], 1) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_string(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_fixed_str(self): - value = u"testing" - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - # - # date, time, datetime - # - - def test_datetime(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(value, result) - - def test_date(self): - value = date(2001, 1, 1) - - self.cursor.execute("create table t1(dt date)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), type(value)) - self.assertEqual(result, value) - - # - # ints and floats - # - - def test_int(self): - value = 1234 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_int(self): - value = -1 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_bigint(self): - - # This fails on 64-bit Fedora with 5.1. - # Should return 0x0123456789 - # Does return 0x0000000000 - # - # Top 4 bytes are returned as 0x00 00 00 00. If the input is high enough, they are returned as 0xFF FF FF FF. - input = 0x123456789 - self.cursor.execute("create table t1(d bigint)") - self.cursor.execute("insert into t1 values (?)", input) - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(result, input) - - def test_float(self): - value = 1234.5 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - - def test_date(self): - value = date.today() - - self.cursor.execute("create table t1(d date)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(value, result) - - - def test_time(self): - value = datetime.now().time() - - # We aren't yet writing values using the new extended time type so the value written to the database is only - # down to the second. - value = value.replace(microsecond=0) - - self.cursor.execute("create table t1(t time)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select t from t1").fetchone()[0] - self.assertEqual(value, result) - - # - # misc - # - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - """ - Ensure Cursor.rowcount is set properly after a select statement. - - pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount. Databases can return the actual rowcount - or they can return -1 if it would help performance. MySQL seems to always return the correct rowcount. - """ - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, count) - - rows = self.cursor.fetchall() - self.assertEqual(len(rows), count) - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_reset(self): - "Ensure rowcount is reset to -1" - - # The Python DB API says that rowcount should be set to -1 and most ODBC drivers let us know there are no - # records. MySQL always returns 0, however. Without parsing the SQL (which we are not going to do), I'm not - # sure how we can tell the difference and set the value to -1. For now, I'll have this test check for 0. - - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.assertEqual(self.cursor.rowcount, 1) - - self.cursor.execute("create table t2(i int)") - self.assertEqual(self.cursor.rowcount, 0) - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_one(self): - "Pass executemany a single sequence" - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, "test") ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - # REVIEW: The following fails. Research. - - # def test_executemany_failure(self): - # """ - # Ensure that an exception is raised if one query in an executemany fails. - # """ - # self.cursor.execute("create table t1(a int, b varchar(10))") - # - # params = [ (1, 'good'), - # ('error', 'not an int'), - # (3, 'good') ] - # - # self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - def test_emoticons_as_parameter(self): - # https://github.com/mkleehammer/pyodbc/issues/423 - # - # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number - # of characters. Ensure it works even with 4-byte characters. - # - # http://www.fileformat.info/info/unicode/char/1f31c/index.htm - - v = u"x \U0001F31C z" - - self.cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") - self.cursor.execute("insert into t1 values (?)", v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def test_emoticons_as_literal(self): - # https://github.com/mkleehammer/pyodbc/issues/630 - - v = u"x \U0001F31C z" - - self.cursor.execute("CREATE TABLE t1(s varchar(100)) DEFAULT CHARSET=utf8mb4") - self.cursor.execute("insert into t1 values ('%s')" % v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - -def main(): - from optparse import OptionParser - parser = OptionParser(usage=usage) - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - - (options, args) = parser.parse_args() - - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - filename = basename(sys.argv[0]) - assert filename.endswith('.py') - connection_string = load_setup_connection_string(filename[:-3]) - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - if options.verbose: - cnxn = pyodbc.connect(connection_string) - print_library_info(cnxn) - cnxn.close() - - suite = load_tests(MySqlTestCase, options.test, connection_string) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - - add_to_path() - - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/pgtests.py b/tests2/pgtests.py deleted file mode 100755 index 471bd46e..00000000 --- a/tests2/pgtests.py +++ /dev/null @@ -1,615 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -usage = """\ -usage: %prog [options] connection_string - -Unit tests for PostgreSQL. To use, pass a connection string as the parameter. -The tests will create and drop tables t1 and t2 as necessary. - -These run using the version from the 'build' directory, not the version -installed into the Python directories. You must run python setup.py build -before running the tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [pgtests] - connection-string=DSN=PostgreSQL35W - -Note: Be sure to use the "Unicode" (not the "ANSI") version of the PostgreSQL ODBC driver. -""" - -import sys, os, re -import unittest -from decimal import Decimal -from testutils import * - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of composed of `seed` to make a string `length` characters long. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class PGTestCase(unittest.TestCase): - - # These are from the C++ code. Keep them up to date. - - # If we are reading a binary, string, or unicode value and do not know how large it is, we'll try reading 2K into a - # buffer on the stack. We then copy into a new Python object. - SMALL_READ = 100 - - # A read guaranteed not to fit in the MAX_STACK_STACK stack buffer, but small enough to be used for varchar (4K max). - LARGE_READ = 4000 - - SMALL_STRING = _generate_test_string(SMALL_READ) - LARGE_STRING = _generate_test_string(LARGE_READ) - - def __init__(self, connection_string, ansi, unicode_results, method_name): - unittest.TestCase.__init__(self, method_name) - self.connection_string = connection_string - self.ansi = ansi - self.unicode = unicode_results - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string, ansi=self.ansi) - self.cursor = self.cnxn.cursor() - - # I've set my test database to use UTF-8 which seems most popular. - self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') - self.cnxn.setencoding(str, encoding='utf-8') - self.cnxn.setencoding(unicode, encoding='utf-8') - - # As of psql 9.5.04 SQLGetTypeInfo returns absurdly small sizes leading - # to slow writes. Override them: - self.cnxn.maxwrite = 1024 * 1024 * 1024 - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - self.cnxn.commit() - except: - pass - - self.cnxn.rollback() - - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - - def _test_strtype(self, sqltype, value, colsize=None, resulttype=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - - self.cursor.execute("select * from t1") - row = self.cursor.fetchone() - result = row[0] - - if resulttype and type(value) is not resulttype: - value = resulttype(value) - - self.assertEqual(result, value) - - - def test_maxwrite(self): - # If we write more than `maxwrite` bytes, pyodbc will switch from - # binding the data all at once to providing it at execute time with - # SQLPutData. The default maxwrite is 1GB so this is rarely needed in - # PostgreSQL but I need to test the functionality somewhere. - self.cnxn.maxwrite = 300 - self._test_strtype('varchar', unicode(_generate_test_string(400), 'utf-8')) - - # - # varchar - # - - def test_empty_varchar(self): - self._test_strtype('varchar', u'', self.SMALL_READ) - - def test_null_varchar(self): - self._test_strtype('varchar', None, self.SMALL_READ) - - def test_large_null_varchar(self): - # There should not be a difference, but why not find out? - self._test_strtype('varchar', None, self.LARGE_READ) - - def test_small_varchar(self): - self._test_strtype('varchar', unicode(self.SMALL_STRING), self.SMALL_READ) - - def test_large_varchar(self): - self._test_strtype('varchar', unicode(self.LARGE_STRING), self.LARGE_READ) - - def test_varchar_many(self): - self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") - - v1 = 'ABCDEFGHIJ' * 30 - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); - row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() - - self.assertEqual(v1, row.c1) - self.assertEqual(v2, row.c2) - self.assertEqual(v3, row.c3) - - def test_varchar_bytes(self): - # Write non-unicode data to a varchar field. - self._test_strtype('varchar', self.SMALL_STRING, self.SMALL_READ) - - - def test_small_decimal(self): - # value = Decimal('1234567890987654321') - value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) - self.cursor.execute("create table t1(d numeric(19))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def test_small_decimal_scale(self): - # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation - # example in the C Data Types appendix. - value = '1000.10' - value = Decimal(value) - self.cursor.execute("create table t1(d numeric(20,6))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def test_negative_decimal_scale(self): - value = Decimal('-10.0010') - self.cursor.execute("create table t1(d numeric(19,4))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), Decimal) - self.assertEqual(v, value) - - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_string(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_fixed_str(self): - value = "testing" - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_raw_encoding(self): - # Read something that is valid ANSI and make sure it comes through. - # The database is actually going to send us UTF-8 so don't use extended - # characters. - # - # REVIEW: Is there a good way to write UTF-8 into the database and read - # it out? - self.cnxn.setencoding(str, encoding='raw') - - expected = "testing" - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values (?)", expected) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(result, expected) - - def test_raw_decoding(self): - # Read something that is valid ANSI and make sure it comes through. - # The database is actually going to send us UTF-8 so don't use extended - # characters. - # - # REVIEW: Is there a good way to write UTF-8 into the database and read - # it out? - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='raw') - self._test_strtype('varchar', self.SMALL_STRING) - - def test_setdecoding(self): - # Force the result to be a string instead of unicode object. I'm not - # sure how to change the encoding for a single column. (Though I'm - # glad you can't - the communications encoding should not depend on - # per-column encoding like MySQL uses.) - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf8', to=str) - self.cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf8', to=str) - self._test_strtype('varchar', 'test', self.SMALL_READ) - - def test_unicode_latin(self): - value = u"x-\u00C2-y" # A hat : Â - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", value) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(result, value) - - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, 4) - - # PostgreSQL driver fails here? - # def test_rowcount_reset(self): - # "Ensure rowcount is reset to -1" - # - # self.cursor.execute("create table t1(i int)") - # count = 4 - # for i in range(count): - # self.cursor.execute("insert into t1 values (?)", i) - # self.assertEqual(self.cursor.rowcount, 1) - # - # self.cursor.execute("create table t2(i int)") - # self.assertEqual(self.cursor.rowcount, -1) - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cnxn.commit() - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - # REVIEW: Without the cast, we get the following error: - # [07006] [unixODBC]Received an unsupported type from Postgres.;\nERROR: table "t2" does not exist (14) - - count = self.cursor.execute("select cast(count(*) as int) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_failure(self): - """ - Ensure that an exception is raised if one query in an executemany fails. - """ - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, 'good'), - ('error', 'not an int'), - (3, 'good') ] - - self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_executemany_generator(self): - self.cursor.execute("create table t1(a int)") - - self.cursor.executemany("insert into t1(a) values (?)", ((i,) for i in range(4))) - - row = self.cursor.execute("select min(a) mina, max(a) maxa from t1").fetchone() - - self.assertEqual(row.mina, 0) - self.assertEqual(row.maxa, 3) - - - def test_executemany_iterator(self): - self.cursor.execute("create table t1(a int)") - - values = [ (i,) for i in range(4) ] - - self.cursor.executemany("insert into t1(a) values (?)", iter(values)) - - row = self.cursor.execute("select min(a) mina, max(a) maxa from t1").fetchone() - - self.assertEqual(row.mina, 0) - self.assertEqual(row.maxa, 3) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_pickling(self): - row = self.cursor.execute("select 1 a, 'two' b").fetchone() - - import pickle - s = pickle.dumps(row) - - other = pickle.loads(s) - - self.assertEqual(row, other) - - - def test_int_limits(self): - values = [ (-sys.maxint - 1), -1, 0, 1, 3230392212, sys.maxint ] - - self.cursor.execute("create table t1(a bigint)") - - for value in values: - self.cursor.execute("delete from t1") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select a from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_emoticons_as_parameter(self): - # https://github.com/mkleehammer/pyodbc/issues/423 - # - # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number - # of characters. Ensure it works even with 4-byte characters. - # - # http://www.fileformat.info/info/unicode/char/1f31c/index.htm - - v = "x \U0001F31C z" - - self.cursor.execute("CREATE TABLE t1(s varchar(100))") - self.cursor.execute("insert into t1 values (?)", v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def test_emoticons_as_literal(self): - # https://github.com/mkleehammer/pyodbc/issues/630 - - v = "x \U0001F31C z" - - self.cursor.execute("CREATE TABLE t1(s varchar(100))") - self.cursor.execute("insert into t1 values ('%s')" % v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - - def test_cursor_messages(self): - """ - Test the Cursor.messages attribute. - """ - # self.cursor is used in setUp, hence is not brand new at this point - brand_new_cursor = self.cnxn.cursor() - self.assertIsNone(brand_new_cursor.messages) - - # using INFO message level because they are always sent to the client regardless of - # client_min_messages: https://www.postgresql.org/docs/11/runtime-config-client.html - for msg in ('hello world', 'ABCDEFGHIJ' * 400): - self.cursor.execute(""" - CREATE OR REPLACE PROCEDURE test_cursor_messages() - LANGUAGE plpgsql - AS $$ - BEGIN - RAISE INFO '{}' USING ERRCODE = '01000'; - END; - $$; - """.format(msg)) - self.cursor.execute("CALL test_cursor_messages();") - messages = self.cursor.messages - self.assertTrue(type(messages) is list) - self.assertTrue(len(messages) > 0) - self.assertTrue(all(type(m) is tuple for m in messages)) - self.assertTrue(all(len(m) == 2 for m in messages)) - self.assertTrue(all(type(m[0]) is unicode for m in messages)) - self.assertTrue(all(type(m[1]) is unicode for m in messages)) - self.assertTrue(all(m[0] == '[01000] (-1)' for m in messages)) - self.assertTrue(''.join(m[1] for m in messages).endswith(msg)) - - -def main(): - from optparse import OptionParser - parser = OptionParser(usage="usage: %prog [options] connection_string") - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - parser.add_option('-a', '--ansi', help='ANSI only', default=False, action='store_true') - parser.add_option('-u', '--unicode', help='Expect results in Unicode', default=False, action='store_true') - - (options, args) = parser.parse_args() - - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - connection_string = load_setup_connection_string('pgtests') - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - if options.verbose: - cnxn = pyodbc.connect(connection_string, ansi=options.ansi) - print_library_info(cnxn) - cnxn.close() - - if options.test: - # Run a single test - if not options.test.startswith('test_'): - options.test = 'test_%s' % (options.test) - - s = unittest.TestSuite([ PGTestCase(connection_string, options.ansi, options.unicode, options.test) ]) - else: - # Run all tests in the class - - methods = [ m for m in dir(PGTestCase) if m.startswith('test_') ] - methods.sort() - s = unittest.TestSuite([ PGTestCase(connection_string, options.ansi, options.unicode, m) for m in methods ]) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(s) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - - add_to_path() - - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/sqldwtests.py b/tests2/sqldwtests.py deleted file mode 100644 index 95b50300..00000000 --- a/tests2/sqldwtests.py +++ /dev/null @@ -1,1499 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -from __future__ import print_function - -usage = """\ -usage: %prog [options] connection_string - -Unit tests for Azure SQL DW. To use, pass a connection string as the parameter. -The tests will create and drop tables t1 and t2 as necessary. - -These run using the version from the 'build' directory, not the version -installed into the Python directories. You must run python setup.py build -before running the tests. - -You can also put the connection string into a tmp/setup.cfg file like so: - - [sqldwtests] - connection-string=DRIVER={SQL Server};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db - -The connection string above will use the 2000/2005 driver, even if SQL Server 2008 -is installed: - - 2000: DRIVER={SQL Server} - 2005: DRIVER={SQL Server} - 2008: DRIVER={SQL Server Native Client 10.0} - -If using FreeTDS ODBC, be sure to use version 1.00.97 or newer. -""" - -import sys, os, re, uuid -import unittest -from decimal import Decimal -from datetime import datetime, date, time -from os.path import join, getsize, dirname, abspath -from warnings import warn -from testutils import * - -_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' - -def _generate_test_string(length): - """ - Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. - - To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are - tested with 3 lengths. This function helps us generate the test data. - - We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will - be hidden and to help us manually identify where a break occurs. - """ - if length <= len(_TESTSTR): - return _TESTSTR[:length] - - c = (length + len(_TESTSTR)-1) / len(_TESTSTR) - v = _TESTSTR * c - return v[:length] - -class SqlServerTestCase(unittest.TestCase): - - SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] - LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] - MAX_FENCEPOST_SIZES = [ 5 * 1024 * 1024 ] #, 50 * 1024 * 1024 ] - - ANSI_SMALL_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] - UNICODE_SMALL_FENCEPOSTS = [ unicode(s) for s in ANSI_SMALL_FENCEPOSTS ] - ANSI_LARGE_FENCEPOSTS = ANSI_SMALL_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] - UNICODE_LARGE_FENCEPOSTS = UNICODE_SMALL_FENCEPOSTS + [ unicode(s) for s in [_generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ]] - - ANSI_MAX_FENCEPOSTS = ANSI_LARGE_FENCEPOSTS + [ _generate_test_string(size) for size in MAX_FENCEPOST_SIZES ] - UNICODE_MAX_FENCEPOSTS = UNICODE_LARGE_FENCEPOSTS + [ unicode(s) for s in [_generate_test_string(size) for size in MAX_FENCEPOST_SIZES ]] - - - def __init__(self, method_name, connection_string): - unittest.TestCase.__init__(self, method_name) - self.connection_string = connection_string - - def driver_type_is(self, type_name): - recognized_types = { - 'msodbcsql': '(Microsoft) ODBC Driver xx for SQL Server', - 'freetds': 'FreeTDS ODBC', - } - if not type_name in recognized_types.keys(): - raise KeyError('"{0}" is not a recognized driver type: {1}'.format(type_name, list(recognized_types.keys()))) - driver_name = self.cnxn.getinfo(pyodbc.SQL_DRIVER_NAME).lower() - if type_name == 'msodbcsql': - return ('msodbcsql' in driver_name) or ('sqlncli' in driver_name) or ('sqlsrv32.dll' == driver_name) - elif type_name == 'freetds': - return ('tdsodbc' in driver_name) - - def get_sqlserver_version(self): - """ - Returns the major version: 8-->2000, 9-->2005, 10-->2008 - """ - self.cursor.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') AS VARCHAR(255))") - row = self.cursor.fetchone() - return int(row[0].split('.', 1)[0]) - - def setUp(self): - self.cnxn = pyodbc.connect(self.connection_string) - self.cursor = self.cnxn.cursor() - - for i in range(3): - try: - self.cursor.execute("drop table t%d" % i) - except: - pass - - for i in range(3): - try: - self.cursor.execute("drop procedure proc%d" % i) - except: - pass - - try: - self.cursor.execute('drop function func1') - except: - pass - - - def tearDown(self): - try: - self.cursor.close() - self.cnxn.close() - except: - # If we've already closed the cursor or connection, exceptions are thrown. - pass - - def test_binary_type(self): - if sys.hexversion >= 0x02060000: - self.assertTrue(pyodbc.BINARY is bytearray) - else: - self.assertTrue(pyodbc.BINARY is buffer) - - def test_multiple_bindings(self): - "More than one bind and select on a cursor" - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t1 values (?)", 2) - self.cursor.execute("insert into t1 values (?)", 3) - for i in range(3): - self.cursor.execute("select n from t1 where n < ?", 10) - self.cursor.execute("select n from t1 where n < 3") - - - def test_different_bindings(self): - self.cursor.execute("create table t1(n int)") - self.cursor.execute("create table t2(d datetime)") - self.cursor.execute("insert into t1 values (?)", 1) - self.cursor.execute("insert into t2 values (?)", datetime.now()) - - def test_drivers(self): - p = pyodbc.drivers() - self.assertTrue(isinstance(p, list)) - - def test_datasources(self): - p = pyodbc.dataSources() - self.assertTrue(isinstance(p, dict)) - - def test_getinfo_string(self): - value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) - self.assertTrue(isinstance(value, str)) - - def test_getinfo_bool(self): - value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) - self.assertTrue(isinstance(value, bool)) - - def test_getinfo_int(self): - value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) - self.assertTrue(isinstance(value, (int, long))) - - def test_getinfo_smallint(self): - value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) - self.assertTrue(isinstance(value, int)) - - def test_noscan(self): - self.assertEqual(self.cursor.noscan, False) - self.cursor.noscan = True - self.assertEqual(self.cursor.noscan, True) - - def test_nextset(self): - self.cursor.execute("create table t1(i int)") - for i in range(4): - self.cursor.execute("insert into t1(i) values(?)", i) - - self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") - - for i, row in enumerate(self.cursor): - self.assertEqual(i, row.i) - - self.assertEqual(self.cursor.nextset(), True) - - for i, row in enumerate(self.cursor): - self.assertEqual(i + 2, row.i) - - def test_nextset_with_raiserror(self): - self.cursor.execute("select i = 1; RAISERROR('c', 16, 1);") - row = next(self.cursor) - self.assertEqual(1, row.i) - if self.driver_type_is('freetds'): - warn('FREETDS_KNOWN_ISSUE - test_nextset_with_raiserror: test cancelled.') - # AssertionError: ProgrammingError not raised by nextset - # https://github.com/FreeTDS/freetds/issues/230 - return # for now - self.assertRaises(pyodbc.ProgrammingError, self.cursor.nextset) - - def test_fixed_unicode(self): - value = u"t\xebsting" - self.cursor.execute("create table t1(s nchar(7))") - self.cursor.execute("insert into t1 values(?)", u"t\xebsting") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), unicode) - self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL - self.assertEqual(v, value) - - - def _test_strtype(self, sqltype, value, resulttype=None, colsize=None): - """ - The implementation for string, Unicode, and binary tests. - """ - assert colsize in (None, 'max') or isinstance(colsize, int), colsize - assert colsize in (None, 'max') or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s)) with (heap)" % (sqltype, colsize) - else: - sql = "create table t1(s %s) with (heap)" % sqltype - self.cursor.execute(sql) - - if resulttype is None: - resulttype = type(value) - - sql = "insert into t1 values(?)" - try: - if colsize == 'max': - if sqltype == 'varbinary': - sqlbind = pyodbc.SQL_VARBINARY - elif sqltype == 'varchar': - sqlbind = pyodbc.SQL_VARCHAR - else: - sqlbind = pyodbc.SQL_WVARCHAR - self.cursor.setinputsizes([(sqlbind, 0, 0)]) - elif (sqltype == 'nvarchar' or sqltype == 'varchar') and colsize != 'max' and colsize > 2000: - self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 0, 0)]) - else: - self.cursor.setinputsizes(None) - self.cursor.execute(sql, value) - except pyodbc.DataError: - if self.driver_type_is('freetds'): - # FREETDS_KNOWN_ISSUE - # - # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so - # pyodbc can't call SQLDescribeParam to get the correct parameter type. - # This can lead to errors being returned from SQL Server when sp_prepexec is called, - # e.g., "Implicit conversion from data type varchar to varbinary is not allowed." - # for test_binary_null - # - # So at least verify that the user can manually specify the parameter type - if sqltype == 'varbinary': - sql_param_type = pyodbc.SQL_VARBINARY - # (add elif blocks for other cases as required) - self.cursor.setinputsizes([(sql_param_type, colsize, 0)]) - self.cursor.execute(sql, value) - else: - raise - v = self.cursor.execute("select * from t1").fetchone()[0] - - # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before - # comparing. - if type(value) is not resulttype: - value = resulttype(value) - - self.assertEqual(v, value) - - - def _test_strliketype(self, sqltype, value, resulttype=None, colsize=None): - """ - The implementation for text, image, ntext, and binary. - - These types do not support comparison operators. - """ - assert colsize is None or isinstance(colsize, int), colsize - assert colsize is None or (value is None or colsize >= len(value)) - - if colsize: - sql = "create table t1(s %s(%s))" % (sqltype, colsize) - else: - sql = "create table t1(s %s)" % sqltype - - if resulttype is None: - resulttype = type(value) - - self.cursor.execute(sql) - self.cursor.execute("insert into t1 values(?)", value) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(result), resulttype) - - # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before - # comparing. - if type(value) is not resulttype: - value = resulttype(value) - - self.assertEqual(result, value) - - - # - # varchar - # - - def test_varchar_null(self): - self._test_strtype('varchar', None, colsize=100) - - # Generate a test for each fencepost size: test_varchar_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, colsize=len(value)) - return t - for value in UNICODE_SMALL_FENCEPOSTS: - locals()['test_varchar_%s' % len(value)] = _maketest(value) - - # Also test varchar(max) - def _maketest(value): - def t(self): - self._test_strtype('varchar', value, colsize='max') - return t - for value in UNICODE_MAX_FENCEPOSTS: - locals()['test_varcharmax_%s' % len(value)] = _maketest(value) - - def test_varchar_many(self): - self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") - - v1 = 'ABCDEFGHIJ' * 30 - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); - row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() - - self.assertEqual(v1, row.c1) - self.assertEqual(v2, row.c2) - self.assertEqual(v3, row.c3) - - def test_varchar_upperlatin(self): - self._test_strtype('varchar', u'\u00e5', colsize=1) - - # - # nvarchar - # - - def test_nvarchar_null(self): - self._test_strtype('nvarchar', None, colsize=100) - - # Generate a test for each fencepost size: test_unicode_0, etc. - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, colsize=len(value)) - return t - for value in UNICODE_SMALL_FENCEPOSTS: - locals()['test_nvarchar_%s' % len(value)] = _maketest(value) - - # Also test nvarchar(max) - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, colsize='max') - return t - for value in UNICODE_MAX_FENCEPOSTS: - locals()['test_nvarcharmax_%s' % len(value)] = _maketest(value) - - def test_unicode_upperlatin(self): - self._test_strtype('nvarchar', u'\u00e5', colsize=1) - - def test_unicode_longmax(self): - # Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes - - ver = self.get_sqlserver_version() - if ver < 9: # 2005+ - return # so pass / ignore - self.cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))") - - def test_fast_executemany_to_local_temp_table(self): - if self.driver_type_is('freetds'): - warn('FREETDS_KNOWN_ISSUE - test_fast_executemany_to_local_temp_table: test cancelled.') - return - v = u'Ώπα' - self.cursor.execute("CREATE TABLE #issue295 (id INT, txt NVARCHAR(50))") - sql = "INSERT INTO #issue295 (txt) VALUES (?)" - params = [(v,)] - self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 50, 0)]) - self.cursor.fast_executemany = True - self.cursor.executemany(sql, params) - self.assertEqual(self.cursor.execute("SELECT txt FROM #issue295").fetchval(), v) - - # - # binary - # - - def test_binaryNull_object(self): - self.cursor.execute("create table t1(n varbinary(10))") - self.cursor.execute("insert into t1 values (?)", pyodbc.BinaryNull); - - # buffer - - def _maketest(value): - def t(self): - self._test_strtype('varbinary', buffer(value), resulttype=pyodbc.BINARY, colsize=len(value)) - return t - for value in ANSI_SMALL_FENCEPOSTS: - locals()['test_binary_buffer_%s' % len(value)] = _maketest(value) - - # bytearray - - if sys.hexversion >= 0x02060000: - def _maketest(value): - def t(self): - self._test_strtype('varbinary', bytearray(value), colsize=len(value)) - return t - for value in ANSI_SMALL_FENCEPOSTS: - locals()['test_binary_bytearray_%s' % len(value)] = _maketest(value) - - # varbinary(max) - def _maketest(value): - def t(self): - self._test_strtype('varbinary', buffer(value), resulttype=pyodbc.BINARY, colsize='max') - return t - for value in ANSI_MAX_FENCEPOSTS: - locals()['test_binarymax_buffer_%s' % len(value)] = _maketest(value) - - # bytearray - - if sys.hexversion >= 0x02060000: - def _maketest(value): - def t(self): - self._test_strtype('varbinary', bytearray(value), colsize='max') - return t - for value in ANSI_MAX_FENCEPOSTS: - locals()['test_binarymax_bytearray_%s' % len(value)] = _maketest(value) - - # - # image - # - - # - # text - # - - # def test_empty_text(self): - # self._test_strliketype('text', bytearray('')) - - # - # xml - # - - # def test_empty_xml(self): - # self._test_strliketype('xml', bytearray('')) - - # - # bit - # - - def test_bit(self): - value = True - self.cursor.execute("create table t1(b bit)") - self.cursor.execute("insert into t1 values (?)", value) - v = self.cursor.execute("select b from t1").fetchone()[0] - self.assertEqual(type(v), bool) - self.assertEqual(v, value) - - # - # decimal - # - - def _decimal(self, precision, scale, negative): - # From test provided by planders (thanks!) in Issue 91 - - self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) - - # Construct a decimal that uses the maximum precision and scale. - decStr = '9' * (precision - scale) - if scale: - decStr = decStr + "." + '9' * scale - if negative: - decStr = "-" + decStr - value = Decimal(decStr) - - self.cursor.execute("insert into t1 values(?)", value) - - v = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(v, value) - - def _maketest(p, s, n): - def t(self): - self._decimal(p, s, n) - return t - for (p, s, n) in [ (1, 0, False), - (1, 0, True), - (6, 0, False), - (6, 2, False), - (6, 4, True), - (6, 6, True), - (38, 0, False), - (38, 10, False), - (38, 38, False), - (38, 0, True), - (38, 10, True), - (38, 38, True) ]: - locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) - - - def test_decimal_e(self): - """Ensure exponential notation decimals are properly handled""" - value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 - self.cursor.execute("create table t1(d decimal(10, 2))") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_subquery_params(self): - """Ensure parameter markers work in a subquery""" - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - row = self.cursor.execute(""" - select x.id - from ( - select id - from t1 - where s = ? - and id between ? and ? - ) x - """, 'test', 1, 10).fetchone() - self.assertNotEqual(row, None) - self.assertEqual(row[0], 1) - - def _exec(self): - self.cursor.execute(self.sql) - - def test_close_cnxn(self): - """Make sure using a Cursor after closing its connection doesn't crash.""" - - self.cursor.execute("create table t1(id integer, s varchar(20))") - self.cursor.execute("insert into t1 values (?,?)", 1, 'test') - self.cursor.execute("select * from t1") - - self.cnxn.close() - - # Now that the connection is closed, we expect an exception. (If the code attempts to use - # the HSTMT, we'll get an access violation instead.) - self.sql = "select * from t1" - self.assertRaises(pyodbc.ProgrammingError, self._exec) - - def test_empty_string(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "") - - def test_empty_string_encoding(self): - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') - value = "" - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_fixed_char(self): - value = "testing" - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_empty_unicode(self): - self.cursor.execute("create table t1(s nvarchar(20))") - self.cursor.execute("insert into t1 values(?)", u"") - - def test_empty_unicode_encoding(self): - self.cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis') - value = "" - self.cursor.execute("create table t1(s nvarchar(20))") - self.cursor.execute("insert into t1 values(?)", value) - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(v, value) - - def test_unicode_query(self): - self.cursor.execute(u"select 1") - - # From issue #206 - def _maketest(value): - def t(self): - self._test_strtype('nvarchar', value, colsize=len(value)) - return t - locals()['test_chinese_param'] = _maketest(u'我的') - - def test_chinese(self): - v = u'我的' - self.cursor.execute(u"SELECT N'我的' AS [Name]") - row = self.cursor.fetchone() - self.assertEqual(row[0], v) - - self.cursor.execute(u"SELECT N'我的' AS [Name]") - rows = self.cursor.fetchall() - self.assertEqual(rows[0][0], v) - - def test_negative_row_index(self): - self.cursor.execute("create table t1(s varchar(20))") - self.cursor.execute("insert into t1 values(?)", "1") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row[0], "1") - self.assertEqual(row[-1], "1") - - def test_version(self): - self.assertEqual(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. - - # - # date, time, datetime - # - - def test_datetime(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(value, result) - - def test_datetime_fraction(self): - # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime - # supported is xxx000. - - value = datetime(2007, 1, 15, 3, 4, 5, 123000) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(value, result) - - def test_datetime_fraction_rounded(self): - # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the - # database supports. - - full = datetime(2007, 1, 15, 3, 4, 5, 123456) - rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) - - self.cursor.execute("create table t1(dt datetime)") - self.cursor.execute("insert into t1 values (?)", full) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(rounded, result) - - def test_date(self): - ver = self.get_sqlserver_version() - if ver < 10: # 2008 only - return # so pass / ignore - - value = date.today() - - self.cursor.execute("create table t1(d date)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(type(result), date) - self.assertEqual(value, result) - - def test_time(self): - ver = self.get_sqlserver_version() - if ver < 10: # 2008 only - return # so pass / ignore - - value = datetime.now().time() - - # We aren't yet writing values using the new extended time type so the value written to the database is only - # down to the second. - value = value.replace(microsecond=0) - - self.cursor.execute("create table t1(t time)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select t from t1").fetchone()[0] - self.assertEqual(type(result), time) - self.assertEqual(value, result) - - def test_datetime2(self): - value = datetime(2007, 1, 15, 3, 4, 5) - - self.cursor.execute("create table t1(dt datetime2)") - self.cursor.execute("insert into t1 values (?)", value) - - result = self.cursor.execute("select dt from t1").fetchone()[0] - self.assertEqual(type(result), datetime) - self.assertEqual(value, result) - - # - # ints and floats - # - - def test_int(self): - value = 1234 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_int(self): - value = -1 - self.cursor.execute("create table t1(n int)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_bigint(self): - input = 3000000000 - self.cursor.execute("create table t1(d bigint)") - self.cursor.execute("insert into t1 values (?)", input) - result = self.cursor.execute("select d from t1").fetchone()[0] - self.assertEqual(result, input) - - def test_float(self): - value = 1234.567 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(result, value) - - def test_negative_float(self): - value = -200 - self.cursor.execute("create table t1(n float)") - self.cursor.execute("insert into t1 values (?)", value) - result = self.cursor.execute("select n from t1").fetchone()[0] - self.assertEqual(value, result) - - - # - # stored procedures - # - - # def test_callproc(self): - # "callproc with a simple input-only stored procedure" - # pass - - def test_sp_results(self): - self.cursor.execute( - """ - Create procedure proc1 - AS - select top 10 name, id, xtype, refdate - from sysobjects - """) - rows = self.cursor.execute("exec proc1").fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - - def test_sp_results_from_temp(self): - - # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. - # If you don't do this, you'd need to call nextset() once to skip it. - - self.cursor.execute( - """ - Create procedure proc1 - AS - set nocount on - select top 10 name, id, xtype, refdate - into #tmptable - from sysobjects - - select * from #tmptable - """) - self.cursor.execute("exec proc1") - self.assertTrue(self.cursor.description is not None) - self.assertTrue(len(self.cursor.description) == 4) - - rows = self.cursor.fetchall() - self.assertEqual(type(rows), list) - self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects - self.assertEqual(type(rows[0].refdate), datetime) - - - def test_sp_with_dates(self): - # Reported in the forums that passing two datetimes to a stored procedure doesn't work. - self.cursor.execute( - """ - if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) - drop procedure [dbo].[test_sp] - """) - self.cursor.execute( - """ - create procedure test_sp(@d1 datetime, @d2 datetime) - AS - declare @d as int - set @d = datediff(year, @d1, @d2) - select @d - """) - self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(rows[0][0] == 0) # 0 years apart - - def test_sp_with_none(self): - # Reported in the forums that passing None caused an error. - self.cursor.execute( - """ - if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) - drop procedure [dbo].[test_sp] - """) - self.cursor.execute( - """ - create procedure test_sp(@x varchar(20)) - AS - declare @y varchar(20) - set @y = @x - select @y - """) - self.cursor.execute("exec test_sp ?", None) - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(rows[0][0] == None) # 0 years apart - - - # - # rowcount - # - - def test_rowcount_delete(self): - self.assertEqual(self.cursor.rowcount, -1) - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, count) - - def test_rowcount_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a - zero return value. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - self.cursor.execute("delete from t1") - self.assertEqual(self.cursor.rowcount, 0) - - def test_rowcount_select(self): - """ - Ensure Cursor.rowcount is set properly after a select statement. - - pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a - select statement, so we'll test for that behavior. This is valid behavior according to the DB API - specification, but people don't seem to like it. - """ - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.cursor.execute("select * from t1") - self.assertEqual(self.cursor.rowcount, -1) - - rows = self.cursor.fetchall() - self.assertEqual(len(rows), count) - self.assertEqual(self.cursor.rowcount, -1) - - def test_rowcount_reset(self): - "Ensure rowcount is reset after DDL" - - ddl_rowcount = 0 if self.driver_type_is('freetds') else -1 - - self.cursor.execute("create table t1(i int)") - count = 4 - for i in range(count): - self.cursor.execute("insert into t1 values (?)", i) - self.assertEqual(self.cursor.rowcount, 1) - - self.cursor.execute("create table t2(i int)") - self.assertEqual(self.cursor.rowcount, ddl_rowcount) - - # - # always return Cursor - # - - # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very - # confusing when things went wrong and added very little value even when things went right since users could always - # use: cursor.execute("...").rowcount - - def test_retcursor_delete(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_nodata(self): - """ - This represents a different code path than a delete that deleted something. - - The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over - the code that errors out and drop down to the same SQLRowCount code. - """ - self.cursor.execute("create table t1(i int)") - # This is a different code path internally. - v = self.cursor.execute("delete from t1") - self.assertEqual(v, self.cursor) - - def test_retcursor_select(self): - self.cursor.execute("create table t1(i int)") - self.cursor.execute("insert into t1 values (1)") - v = self.cursor.execute("select * from t1") - self.assertEqual(v, self.cursor) - - # - # misc - # - - def table_with_spaces(self): - "Ensure we can select using [x z] syntax" - - try: - self.cursor.execute("create table [test one](int n)") - self.cursor.execute("insert into [test one] values(1)") - self.cursor.execute("select * from [test one]") - v = self.cursor.fetchone()[0] - self.assertEqual(v, 1) - finally: - self.cnxn.rollback() - - def test_lower_case(self): - "Ensure pyodbc.lowercase forces returned column names to lowercase." - - # Has to be set before creating the cursor, so we must recreate self.cursor. - - pyodbc.lowercase = True - self.cursor = self.cnxn.cursor() - - self.cursor.execute("create table t1(Abc int, dEf int)") - self.cursor.execute("select * from t1") - - names = [ t[0] for t in self.cursor.description ] - names.sort() - - self.assertEqual(names, [ "abc", "def" ]) - - # Put it back so other tests don't fail. - pyodbc.lowercase = False - - def test_row_description(self): - """ - Ensure Cursor.description is accessible as Row.cursor_description. - """ - self.cursor = self.cnxn.cursor() - self.cursor.execute("create table t1(a int, b char(3))") - self.cursor.execute("insert into t1 values(1, 'abc')") - - row = self.cursor.execute("select * from t1").fetchone() - - self.assertEqual(self.cursor.description, row.cursor_description) - - - def test_temp_select(self): - # A project was failing to create temporary tables via select into. - self.cursor.execute("create table t1(s char(7))") - self.cursor.execute("insert into t1 values(?)", "testing") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), unicode) - self.assertEqual(v, "testing") - - self.cursor.execute("select s into t2 from t1") - v = self.cursor.execute("select * from t1").fetchone()[0] - self.assertEqual(type(v), unicode) - self.assertEqual(v, "testing") - - - def test_money(self): - d = Decimal('123456.78') - self.cursor.execute("create table t1(i int identity(1,1), m money)") - self.cursor.execute("insert into t1(m) values (?)", d) - v = self.cursor.execute("select m from t1").fetchone()[0] - self.assertEqual(v, d) - - - def test_executemany(self): - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (i, str(i)) for i in range(1, 6) ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - - def test_executemany_one(self): - "Pass executemany a single sequence" - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, "test") ] - - self.cursor.executemany("insert into t1(a, b) values (?,?)", params) - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, len(params)) - - self.cursor.execute("select a, b from t1 order by a") - rows = self.cursor.fetchall() - self.assertEqual(count, len(rows)) - - for param, row in zip(params, rows): - self.assertEqual(param[0], row[0]) - self.assertEqual(param[1], row[1]) - - def test_executemany_dae_0(self): - """ - DAE for 0-length value - """ - self.cursor.execute("create table t1(a nvarchar(max)) with (heap)") - - self.cursor.fast_executemany = True - self.cursor.executemany("insert into t1(a) values(?)", [['']]) - - self.assertEqual(self.cursor.execute("select a from t1").fetchone()[0], '') - - self.cursor.fast_executemany = False - - def test_executemany_failure(self): - """ - Ensure that an exception is raised if one query in an executemany fails. - """ - self.cursor.execute("create table t1(a int, b varchar(10))") - - params = [ (1, 'good'), - ('error', 'not an int'), - (3, 'good') ] - - self.assertRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) - - - def test_row_slicing(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = row[:] - self.assertTrue(result is row) - - result = row[:-1] - self.assertEqual(result, (1,2,3)) - - result = row[0:4] - self.assertTrue(result is row) - - - def test_row_repr(self): - self.cursor.execute("create table t1(a int, b int, c int, d int)"); - self.cursor.execute("insert into t1 values(1,2,3,4)") - - row = self.cursor.execute("select * from t1").fetchone() - - result = str(row) - self.assertEqual(result, "(1, 2, 3, 4)") - - result = str(row[:-1]) - self.assertEqual(result, "(1, 2, 3)") - - result = str(row[:1]) - self.assertEqual(result, "(1,)") - - - def test_concatenation(self): - v2 = '0123456789' * 30 - v3 = '9876543210' * 30 - - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") - self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) - - row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() - - self.assertEqual(row.both, v2 + v3) - - def test_view_select(self): - # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. - - # Create a table (t1) with 3 rows and a view (t2) into it. - self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") - for i in range(3): - self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) - self.cursor.execute("create view t2 as select * from t1") - - # Select from the view - self.cursor.execute("select * from t2") - rows = self.cursor.fetchall() - self.assertTrue(rows is not None) - self.assertTrue(len(rows) == 3) - self.cursor.execute("drop view t2") - - def test_autocommit(self): - self.assertEqual(self.cnxn.autocommit, False) - - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - self.assertEqual(othercnxn.autocommit, True) - - othercnxn.autocommit = False - self.assertEqual(othercnxn.autocommit, False) - - def test_cursorcommit(self): - "Ensure cursor.commit works" - othercnxn = pyodbc.connect(self.connection_string, autocommit=True) - othercursor = othercnxn.cursor() - othercnxn = None - - othercursor.execute("create table t1(s varchar(20))") - othercursor.execute("insert into t1 values(?)", 'test') - othercursor.commit() - - value = self.cursor.execute("select s from t1").fetchone()[0] - self.assertEqual(value, 'test') - - - def test_unicode_results(self): - "Ensure unicode_results forces Unicode" - othercnxn = pyodbc.connect(self.connection_string, unicode_results=True, autocommit=True) - othercursor = othercnxn.cursor() - - # ANSI data in an ANSI column ... - othercursor.execute("create table t1(s varchar(20))") - othercursor.execute("insert into t1 values(?)", 'test') - - # ... should be returned as Unicode - value = othercursor.execute("select s from t1").fetchone()[0] - self.assertEqual(value, u'test') - - - - def test_skip(self): - # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. - - self.cursor.execute("create table t1(id int)"); - for i in range(1, 5): - self.cursor.execute("insert into t1 values(?)", i) - self.cursor.execute("select id from t1 order by id") - self.assertEqual(self.cursor.fetchone()[0], 1) - self.cursor.skip(2) - self.assertEqual(self.cursor.fetchone()[0], 4) - - def test_timeout(self): - self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) - - self.cnxn.timeout = 30 - self.assertEqual(self.cnxn.timeout, 30) - - self.cnxn.timeout = 0 - self.assertEqual(self.cnxn.timeout, 0) - - def test_sets_execute(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) - - self.assertRaises(pyodbc.ProgrammingError, f) - - def test_sets_executemany(self): - # Only lists and tuples are allowed. - def f(): - self.cursor.execute("create table t1 (word varchar (100))") - words = set (['a']) - self.cursor.executemany("insert into t1 (word) values (?)", [words]) - - self.assertRaises(TypeError, f) - - def test_row_execute(self): - "Ensure we can use a Row object as a parameter to execute" - self.cursor.execute("create table t1(n int, s varchar(10))") - self.cursor.execute("insert into t1 values (1, 'a')") - row = self.cursor.execute("select n, s from t1").fetchone() - self.assertNotEqual(row, None) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.execute("insert into t2 values (?, ?)", row) - - def test_row_executemany(self): - "Ensure we can use a Row object as a parameter to executemany" - self.cursor.execute("create table t1(n int, s varchar(10))") - - for i in range(3): - self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) - - rows = self.cursor.execute("select n, s from t1").fetchall() - self.assertNotEqual(len(rows), 0) - - self.cursor.execute("create table t2(n int, s varchar(10))") - self.cursor.executemany("insert into t2 values (?, ?)", rows) - - def test_description(self): - "Ensure cursor.description is correct" - - self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") - self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") - self.cursor.execute("select * from t1") - - # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the - # items I do know. - - # int - t = self.cursor.description[0] - self.assertEqual(t[0], 'n') - self.assertEqual(t[1], int) - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # varchar(8) - t = self.cursor.description[1] - self.assertEqual(t[0], 's') - self.assertEqual(t[1], str) - self.assertEqual(t[4], 8) # precision - self.assertEqual(t[5], 0) # scale - self.assertEqual(t[6], True) # nullable - - # decimal(5, 2) - t = self.cursor.description[2] - self.assertEqual(t[0], 'd') - self.assertEqual(t[1], Decimal) - self.assertEqual(t[4], 5) # precision - self.assertEqual(t[5], 2) # scale - self.assertEqual(t[6], True) # nullable - - - def test_none_param(self): - "Ensure None can be used for params other than the first" - # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used - # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the - # first column, but did not work for later columns. - # - # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, - # binary/varbinary won't allow an implicit conversion. - - self.cursor.execute("create table t1(n int, blob varbinary(max)) with(heap)") - self.cursor.execute("insert into t1 values (1, 0x1234)") - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row.n, 1) - self.assertEqual(type(row.blob), bytearray) - - sql = "update t1 set n=?, blob=?" - try: - self.cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) - self.cursor.execute(sql, 2, None) - except pyodbc.DataError: - if self.driver_type_is('freetds'): - # FREETDS_KNOWN_ISSUE - # - # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so - # pyodbc can't call SQLDescribeParam to get the correct parameter type. - # This can lead to errors being returned from SQL Server when sp_prepexec is called, - # e.g., "Implicit conversion from data type varchar to varbinary(max) is not allowed." - # - # So at least verify that the user can manually specify the parameter type - self.cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)]) - self.cursor.execute(sql, 2, None) - else: - raise - row = self.cursor.execute("select * from t1").fetchone() - self.assertEqual(row.n, 2) - self.assertEqual(row.blob, None) - - - def test_output_conversion(self): - def convert(value): - # `value` will be a string. We'll simply add an X at the beginning at the end. - return 'X' + value + 'X' - self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) - self.cursor.execute("create table t1(n int, v varchar(10))") - self.cursor.execute("insert into t1 values (1, '123.45')") - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, 'X123.45X') - - # Now clear the conversions and try again. There should be no Xs this time. - self.cnxn.clear_output_converters() - value = self.cursor.execute("select v from t1").fetchone()[0] - self.assertEqual(value, '123.45') - - - def test_too_large(self): - """Ensure error raised if insert fails due to truncation""" - value = 'x' * 1000 - self.cursor.execute("create table t1(s varchar(800))") - def test(): - self.cursor.execute("insert into t1 values (?)", value) - self.assertRaises(pyodbc.DataError, test) - - def test_login_timeout(self): - # This can only test setting since there isn't a way to cause it to block on the server side. - cnxns = pyodbc.connect(self.connection_string, timeout=2) - - def test_row_equal(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test')") - row1 = self.cursor.execute("select n, s from t1").fetchone() - row2 = self.cursor.execute("select n, s from t1").fetchone() - b = (row1 == row2) - self.assertEqual(b, True) - - def test_row_gtlt(self): - self.cursor.execute("create table t1(n int, s varchar(20))") - self.cursor.execute("insert into t1 values (1, 'test1')") - self.cursor.execute("insert into t1 values (1, 'test2')") - rows = self.cursor.execute("select n, s from t1 order by s").fetchall() - self.assertTrue(rows[0] < rows[1]) - self.assertTrue(rows[0] <= rows[1]) - self.assertTrue(rows[1] > rows[0]) - self.assertTrue(rows[1] >= rows[0]) - self.assertTrue(rows[0] != rows[1]) - - rows = list(rows) - rows.sort() # uses < - - def test_context_manager_success(self): - """ - Ensure a successful with statement causes a commit. - """ - self.cursor.execute("create table t1(n int)") - - with pyodbc.connect(self.connection_string) as cnxn: - cursor = cnxn.cursor() - cursor.execute("insert into t1 values (1)") - - cnxn = None - cursor = None - - rows = self.cursor.execute("select n from t1").fetchall() - self.assertEqual(len(rows), 1) - self.assertEqual(rows[0][0], 1) - - - def test_context_manager_fail(self): - """ - Ensure an exception in a with statement causes a rollback. - """ - self.cursor.execute("create table t1(n int)") - - try: - with pyodbc.connect(self.connection_string) as cnxn: - cursor = cnxn.cursor() - cursor.execute("insert into t1 values (1)") - raise Exception("Testing failure") - except Exception: - pass - - cnxn = None - cursor = None - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, 0) - - - def test_cursor_context_manager_success(self): - """ - Ensure a successful with statement using a cursor causes a commit. - """ - self.cursor.execute("create table t1(n int)") - - with pyodbc.connect(self.connection_string).cursor() as cursor: - cursor.execute("insert into t1 values (1)") - - cursor = None - - rows = self.cursor.execute("select n from t1").fetchall() - self.assertEqual(len(rows), 1) - self.assertEqual(rows[0][0], 1) - - - def test_cursor_context_manager_fail(self): - """ - Ensure an exception in a with statement using a cursor causes a rollback. - """ - self.cursor.execute("create table t1(n int)") - - try: - with pyodbc.connect(self.connection_string).cursor() as cursor: - cursor.execute("insert into t1 values (1)") - raise Exception("Testing failure") - except Exception: - pass - - cursor = None - - count = self.cursor.execute("select count(*) from t1").fetchone()[0] - self.assertEqual(count, 0) - - - def test_untyped_none(self): - # From issue 129 - value = self.cursor.execute("select ?", None).fetchone()[0] - self.assertEqual(value, None) - - def test_large_update_nodata(self): - self.cursor.execute('create table t1(a varbinary(max)) with(heap)') - hundredkb = bytearray('x'*100*1024) - self.cursor.setinputsizes([(pyodbc.SQL_VARBINARY,0,0)]) - self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) - - - def test_no_fetch(self): - # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to - # confuse the driver. - self.cursor.execute('select 1') - self.cursor.execute('select 1') - self.cursor.execute('select 1') - - def test_drivers(self): - drivers = pyodbc.drivers() - self.assertEqual(list, type(drivers)) - self.assertTrue(len(drivers) > 0) - - m = re.search('DRIVER={?([^}]+?)}?;', self.connection_string, re.IGNORECASE) - current = m.group(1) - self.assertTrue(current in drivers) - - def test_prepare_cleanup(self): - # When statement is prepared, it is kept in case the next execute uses the same statement. This must be - # removed when a non-execute statement is used that returns results, such as SQLTables. - - self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") - self.cursor.fetchone() - - self.cursor.tables("bogus") - - self.cursor.execute("select top 1 name from sysobjects where name = ?", "bogus") - self.cursor.fetchone() - - def test_emoticons(self): - # https://github.com/mkleehammer/pyodbc/issues/423 - # - # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number - # of characters. Ensure it works even with 4-byte characters. - # - # http://www.fileformat.info/info/unicode/char/1f31c/index.htm - - v = "x \U0001F31C z" - - self.cursor.execute("create table t1(s varchar(100))") - self.cursor.execute("insert into t1 values (?)", v) - - result = self.cursor.execute("select s from t1").fetchone()[0] - - self.assertEqual(result, v) - -def main(): - from optparse import OptionParser - parser = OptionParser(usage=usage) - parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") - parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") - parser.add_option("-t", "--test", help="Run only the named test") - - (options, args) = parser.parse_args() - - if len(args) > 1: - parser.error('Only one argument is allowed. Do you need quotes around the connection string?') - - if not args: - connection_string = load_setup_connection_string('sqldwtests') - - if not connection_string: - parser.print_help() - raise SystemExit() - else: - connection_string = args[0] - - if options.verbose: - cnxn = pyodbc.connect(connection_string) - print_library_info(cnxn) - cnxn.close() - - suite = load_tests(SqlServerTestCase, options.test, connection_string) - - testRunner = unittest.TextTestRunner(verbosity=options.verbose) - result = testRunner.run(suite) - - return result - - -if __name__ == '__main__': - - # Add the build directory to the path so we're testing the latest build, not the installed version. - - add_to_path() - - import pyodbc - sys.exit(0 if main().wasSuccessful() else 1) diff --git a/tests2/sqlite.db b/tests2/sqlite.db deleted file mode 100644 index ccd00e502da253d894f3ea95a473fe5d0df9441e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2048 zcmWFz^vNtqRY=P(%1ta$FlJz3U}R))P*7lCU~mFrCMafv@|8TBrz!`wZyQ*kjXj7)iK0XA;i(i$5o-kP$N+x gGp|HPAxR-QBe6)sSW}alc8{7h8UmvsFtkDd0JzW